code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
package 练习22
object Runner {
def main(arr: Array[String]): Unit = {
// 下次尝试 3数相乘.乘(t1, t2, t3)
// 下次尝试 乘数为0
val 乘数1 = 乘数T1.fromInt(47)
val 乘数2 = 乘数T2.fromInt(31)
val 乘数3 = 乘数T3.fromInt(23)
println(s"Type value: ${乘数3.乘3(乘数1, 乘数2).length}")
println(s"Count value: ${47 * 31 * 23}")
}
}
|
djx314/ubw
|
a28-练习/src/main/scala/练习22/Runner.scala
|
Scala
|
bsd-3-clause
| 393 |
package com.sksamuel.scapegoat.inspections.math
import com.sksamuel.scapegoat.PluginRunner
import org.scalatest.{ FreeSpec, Matchers }
/** @author Matic Potočnik */
class UseExpM1Test extends FreeSpec with Matchers with PluginRunner {
override val inspections = Seq(new UseExpM1)
"using exp(x) - 1 instead of expm1(x)" - {
"should report warning" in {
val code = """object Test {
val a = 2d
math.exp(a) - 1
Math.exp(a) - 1
StrictMath.exp(a) - 1
} """.stripMargin
compileCodeSnippet(code)
compiler.scapegoat.feedback.warnings.size shouldBe 3
}
}
}
|
pwwpche/scalac-scapegoat-plugin
|
src/test/scala/com/sksamuel/scapegoat/inspections/math/UseExpM1Test.scala
|
Scala
|
apache-2.0
| 702 |
/*
* Copyright 2014–2017 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.fs.mount.cache
import slamdata.Predef._
import quasar.contrib.pathy.AFile
import quasar.fs.mount.MountConfig
import java.time.{Duration => JDuration, Instant}
import scala.concurrent.duration._
import scalaz._, Scalaz._
final case class ViewCache(
viewConfig: MountConfig.ViewConfig,
lastUpdate: Option[Instant],
executionMillis: Option[Long],
cacheReads: Int,
assignee: Option[String],
assigneeStart: Option[Instant],
maxAgeSeconds: Long,
refreshAfter: Instant,
status: ViewCache.Status,
errorMsg: Option[String],
dataFile: AFile,
tmpDataFile: Option[AFile])
object ViewCache {
sealed trait Status
object Status {
final case object Pending extends Status
final case object Successful extends Status
final case object Failed extends Status
implicit val equal: Equal[Status] = Equal.equalRef
}
// Hard coded to 80% of maxAge for now
def expireAt(ts: Instant, maxAge: Duration): Throwable \\/ Instant =
\\/.fromTryCatchNonFatal(ts.plus(JDuration.ofMillis((maxAge.toMillis.toDouble * 0.8).toLong)))
implicit val equal: Equal[ViewCache] = {
implicit val equalInstant: Equal[Instant] = Equal.equalA
Equal.equal {
case (ViewCache(l1, l2, l3, l4, l5, l6, l7, l8, l9, l10, l11, l12),
ViewCache(r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12)) =>
(l1: MountConfig) ≟ (r1: MountConfig) && l2 ≟ r2 && l3 ≟ r3 && l4 ≟ r4 && l5 ≟ r5 && l6 ≟ r6 &&
l7 ≟ r7 && l8 ≟ r8 && l9 ≟ r9 && l10 ≟ r10 && l11 ≟ r11 && l12 ≟ r12
}
}
implicit val show: Show[ViewCache] = Show.showFromToString
}
|
drostron/quasar
|
core/src/main/scala/quasar/fs/mount/cache/ViewCache.scala
|
Scala
|
apache-2.0
| 2,236 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package controllers.businessactivities
import connectors.DataCacheConnector
import controllers.actions.SuccessfulAuthAction
import models.businessactivities._
import org.jsoup.Jsoup
import org.jsoup.nodes.Document
import org.mockito.Matchers._
import org.mockito.Mockito._
import org.scalatest.concurrent.ScalaFutures
import org.scalatestplus.mockito.MockitoSugar
import utils.AmlsSpec
import play.api.i18n.Messages
import play.api.libs.json.Json
import play.api.test.Helpers._
import uk.gov.hmrc.http.cache.client.CacheMap
import views.html.businessactivities.tax_matters
import scala.concurrent.Future
class TaxMattersControllerSpec extends AmlsSpec with MockitoSugar with ScalaFutures{
trait Fixture {
self => val request = addToken(authRequest)
lazy val view = app.injector.instanceOf[tax_matters]
val controller = new TaxMattersController (
dataCacheConnector = mock[DataCacheConnector],
SuccessfulAuthAction,
ds = commonDependencies,
cc = mockMcc,
tax_matters = view,
errorView)
}
"TaxMattersController" when {
"get is called" must {
"display the 'Manage Your Tax Affairs?' page with an empty form" in new Fixture {
when(controller.dataCacheConnector.fetch[BusinessActivities](any(), any())(any(), any()))
.thenReturn(Future.successful(Some(BusinessActivities(
whoIsYourAccountant = Some(WhoIsYourAccountant(
Some(WhoIsYourAccountantName("Accountant name", accountantsTradingName = None)),
Some(WhoIsYourAccountantIsUk(true)),
Some(UkAccountantsAddress("", "", None, None, ""))))))))
val result = controller.get()(request)
status(result) must be(OK)
val page = Jsoup.parse(contentAsString(result))
page.getElementById("manageYourTaxAffairs-true").hasAttr("checked") must be(false)
page.getElementById("manageYourTaxAffairs-false").hasAttr("checked") must be(false)
}
"display the 'Manage Your Tax Affairs?' page with pre populated data if found in cache" in new Fixture {
when(controller.dataCacheConnector.fetch[BusinessActivities](any(), any())(any(), any()))
.thenReturn(Future.successful(Some(BusinessActivities(taxMatters = Some(TaxMatters(true)),
whoIsYourAccountant = Some(WhoIsYourAccountant(
Some(WhoIsYourAccountantName("Accountant name", accountantsTradingName = None)),
Some(WhoIsYourAccountantIsUk(true)),
Some(UkAccountantsAddress("", "", None, None, ""))))))))
val result = controller.get()(request)
status(result) must be(OK)
val page = Jsoup.parse(contentAsString(result))
page.getElementById("manageYourTaxAffairs-true").hasAttr("checked") must be(true)
page.getElementById("manageYourTaxAffairs-false").hasAttr("checked") must be(false)
}
}
"post is called" must {
"redirect to Check Your Answers on post with valid data" in new Fixture {
val newRequest = requestWithUrlEncodedBody(
"manageYourTaxAffairs" -> "true"
)
when(controller.dataCacheConnector.fetch[BusinessActivities](any(), any())(any(), any()))
.thenReturn(Future.successful(None))
when(controller.dataCacheConnector.save[BusinessActivities](any(), any(), any())(any(), any()))
.thenReturn(Future.successful(CacheMap(BusinessActivities.key, Map("" -> Json.obj()))))
val result = controller.post()(newRequest)
status(result) must be(SEE_OTHER)
redirectLocation(result) must be(Some(routes.SummaryController.get.url))
}
"respond with Bad Request on post with invalid data" in new Fixture {
val accountantsName = "Accountant name"
when(controller.dataCacheConnector.fetch[BusinessActivities](any(), any())(any(), any()))
.thenReturn(Future.successful(Some(BusinessActivities(
whoIsYourAccountant = Some(
WhoIsYourAccountant(
Some(WhoIsYourAccountantName(accountantsName, None)),
None,
None))))))
val newRequest = requestWithUrlEncodedBody(
"manageYourTaxAffairs" -> "grrrrr"
)
val result = controller.post()(newRequest)
status(result) must be(BAD_REQUEST)
val document: Document = Jsoup.parse(contentAsString(result))
document.select("span").html() must include(Messages("error.required.ba.tax.matters", accountantsName))
}
"redirect to Check Your Answers on post with valid data in edit mode" in new Fixture {
val newRequest = requestWithUrlEncodedBody(
"manageYourTaxAffairs" -> "true"
)
when(controller.dataCacheConnector.fetch[BusinessActivities](any(), any())(any(), any()))
.thenReturn(Future.successful(None))
when(controller.dataCacheConnector.save[BusinessActivities](any(), any(), any())(any(), any()))
.thenReturn(Future.successful(CacheMap(BusinessActivities.key, Map("" -> Json.obj()))))
val result = controller.post(true)(newRequest)
status(result) must be(SEE_OTHER)
redirectLocation(result) must be(Some(routes.SummaryController.get.url))
}
}
}
}
|
hmrc/amls-frontend
|
test/controllers/businessactivities/TaxMattersControllerSpec.scala
|
Scala
|
apache-2.0
| 5,865 |
package com.github.bespalovdn.asteriskscala.common.protocol
object AsteriskFormatter
{
implicit class EscapeAndQuoteString(value: String){
def escaped: String = escapeAndQuote(value)
}
implicit class EscapeAndQuoteTraversable(value: Traversable[String]){
def escaped: String = escapeAndQuote(value)
}
protected def escapeAndQuote(value: String): String = value match {
case null => "\\"\\""
case str => "\\"%s\\"" format str.
replaceAll("""\\\\""", """\\\\\\\\""").
replaceAll("""\\"""", """\\\\"""").
replaceAll("\\\\\\n", "")
}
protected def escapeAndQuote(values: Traversable[String]): String = values match {
case null => escapeAndQuote(null: String)
case vs =>
val str = vs.map{_.replaceAll(",", """\\\\,""")}.mkString(",")
escapeAndQuote(str)
}
}
|
bespalovdn/asterisk-scala
|
common/src/main/scala/com/github/bespalovdn/asteriskscala/common/protocol/AsteriskFormatter.scala
|
Scala
|
mit
| 879 |
package com.lucidchart.open.cashy.uploaders
import com.lucidchart.open.cashy.amazons3.S3Client
import com.lucidchart.open.cashy.models.{Asset, User, AssetModel}
import com.lucidchart.open.cashy.utils.JsCompress
import com.lucidchart.open.cashy.config.UploadFeatureConfig
import scala.collection.mutable.MutableList
object JsUploader extends JsUploader
class JsUploader extends Uploader with UploadFeatureConfig {
override def upload(bytes: Array[Byte], contentType: Option[String], user: User, data: UploadFormSubmission): UploadResult = {
val bucket = data.bucket
val assetName = data.assetName
val uploadedAssets = MutableList[Tuple2[String,Asset]]()
val existingAssets = MutableList[Tuple2[String,Asset]]()
// Upload the asset
val asset = uploadAndAudit(bytes, bucket, assetName, contentType, user)
if(checkMinified(assetName)) {
uploadedAssets += (("Minified", asset))
} else {
uploadedAssets += (("Original", asset))
if(uploadFeatures.compressJsEnabled) {
val extension = getExtension(assetName)
val minAssetName = assetName.substring(0, assetName.toLowerCase.lastIndexOf("." + extension.toLowerCase)) + ".min." + extension
// Make sure a min version with this name does not already exist
if(!S3Client.existsInS3(bucket, minAssetName)) {
val (minBytes, compressErrors) = JsCompress.compress(bytes)
if (compressErrors.size > 0) {
throw new UploadFailedException("Minifying javascript failed: " + (bucket, assetName) + "\\n" + compressErrors.mkString("\\n"))
}
val minAsset = uploadAndAudit(minBytes, bucket, minAssetName, contentType, user)
uploadedAssets += (("Minified", minAsset))
} else {
// A min version already exists, find it in cashy and return as an existing asset
val minAsset = AssetModel.findByKey(bucket, minAssetName).get
existingAssets += (("Minified", minAsset))
}
}
}
UploadResult(
uploadedAssets.toList,
existingAssets.toList,
asset.bucket,
asset.parent
)
}
}
|
lucidsoftware/cashy
|
app/com/lucidchart/open/cashy/uploaders/JsUploader.scala
|
Scala
|
apache-2.0
| 2,252 |
package io.github.suitougreentea.VariousMinos
/**
* Created by suitougreentea on 14/12/04.
*/
class Timer {
var stackTime: Long = 0
var startTime: Long = -1
var stopped = true
def start(): Unit = {
startTime = System.currentTimeMillis()
stopped = false
}
def stop(): Unit = {
if(startTime != -1) stackTime += System.currentTimeMillis() - startTime
stopped = true
}
def time = if(stopped) stackTime else stackTime + System.currentTimeMillis() - startTime
def mkString(formatter: String) = formatter.format(time / (60 * 1000), time % (60 * 1000) / 1000, time % 1000 / 10, time % 1000)
}
|
suitougreentea/VariousMinos2
|
src/main/scala/io/github/suitougreentea/VariousMinos/Timer.scala
|
Scala
|
mit
| 623 |
package caustic.compiler.util
/**
* An indentation preserving string context. Standard string interpolation does not preserve
* indentation; s" $x" will indent only the first line of x and not subsequent lines. Indenters
* apply the same indentation to all lines of x; i" $x" will indent all lines of x by 2.
*
* @see https://stackoverflow.com/a/11426477/1447029
* @param context String context.
*/
case class Indenter(context: StringContext) {
def i(args: Any*): String = {
val builder = new StringBuilder()
val parts = context.parts.map(_.stripMargin)
(parts zip args) foreach { case (part, arg) =>
builder.append(part)
val whitespace = builder.substring(builder.lastIndexOf("\\n") + 1)
val indent = if (whitespace.trim.isEmpty) whitespace.length else 0
builder.append(arg.toString.replaceAll("\\n(?!$)", "\\n" + " " * indent))
}
if (parts.size > args.size) builder.append(parts.last).toString else builder.toString
}
}
|
ashwin153/caustic
|
caustic-compiler/src/main/scala/caustic/compiler/util/Indenter.scala
|
Scala
|
apache-2.0
| 980 |
package at.logic.gapt.proofs.sketch
import at.logic.gapt.expr.{ FOLAtom, Atom, clauseSubsumption }
import at.logic.gapt.proofs.resolution._
import at.logic.gapt.proofs.{ FOLClause, HOLClause, SequentConnector, SequentProof }
import at.logic.gapt.provers.ResolutionProver
import at.logic.gapt.provers.escargot.{ Escargot, NonSplittingEscargot }
import at.logic.gapt.provers.sat.Sat4j
import scala.collection.mutable
import cats.instances.all._
import cats.syntax.all._
/**
* Intermediate data structure intendend for the proof replay in the TPTP proof import.
*
* A refutation sketch is a list of clauses, where each clause is either an axiom (that occurs in the CNF of the
* original end-sequent) or is a first-order consequence of previous ones.
*
* These two cases are modelled as [[SketchAxiom]] and [[SketchInference]].
*/
sealed trait RefutationSketch extends SequentProof[FOLAtom, RefutationSketch] {
override def occConnectors = immediateSubProofs map { p => SequentConnector( conclusion, p.conclusion, p.conclusion map { _ => Seq() } ) }
override def mainIndices = Seq()
override def auxIndices = immediateSubProofs map { _ => Seq() }
}
/**
* Axiom in a refutation sketch.
*
* The clause [[axiom]] occurs as a clause in the CNF of the end-sequent we're proving.
*
* @param axiom Clause of the CNF.
*/
case class SketchAxiom( axiom: FOLClause ) extends RefutationSketch {
override def conclusion = axiom
override def immediateSubProofs: Seq[RefutationSketch] = Seq()
}
/**
* Inference in a refutation sketch.
*
* The clause [[from]] should be a first-order consequence of the conclusions of [[from]].
*
* This rule corresponds to a line in a TPTP proof which just indicates the previous lines from which it follows,
* but does not specify the precise inference rule employed.
*
* @param conclusion Conclusion of the inference.
* @param from Premises of the inference.
*/
case class SketchInference( conclusion: FOLClause, from: Seq[RefutationSketch] ) extends RefutationSketch {
override def immediateSubProofs = from
override def productArity = 1 + from.size
override def productElement( n: Int ) = if ( n == 0 ) conclusion else from( n - 1 )
}
case class SketchComponentIntro( component: AvatarDefinition ) extends RefutationSketch {
def immediateSubProofs = Seq()
def conclusion = component.clause.map( _.asInstanceOf[FOLAtom] )
}
case class SketchComponentElim( subProof: RefutationSketch, component: AvatarDefinition ) extends RefutationSketch {
def immediateSubProofs = Seq( subProof )
val conclusion = subProof.conclusion diff component.clause
}
case class SketchSplitCombine( splitCases: Seq[RefutationSketch] ) extends RefutationSketch {
for ( p <- splitCases ) require( p.conclusion.isEmpty, p )
override def immediateSubProofs = splitCases
override def conclusion = FOLClause()
override def productArity = splitCases.size
override def productElement( n: Int ) = splitCases( n )
}
case class UnprovableSketchInference( inference: RefutationSketch ) {
override def toString = s"\nCannot prove\n${inference.conclusion}\n\nfrom\n\n${inference.immediateSubProofs.map( _.conclusion ).mkString( "\n\n" )}\n"
}
object RefutationSketchToResolution {
/**
* Converts a refutation sketch to a resolution proof.
*
* Each [[SketchInference]] is replaced by a resolution derivation that is obtained
* using the provided resolution prover.
*
* @param sketch Refutation sketch to convert.
* @param prover Resolution prover used to reconstruct the inferences.
* @return <code>Some(proof)</code> if all inferences could be reconstructed, <code>None</code> otherwise.
*/
def apply( sketch: RefutationSketch, prover: ResolutionProver = NonSplittingEscargot ): Either[UnprovableSketchInference, ResolutionProof] = {
type ErrorOr[X] = Either[UnprovableSketchInference, X]
val memo = mutable.Map[RefutationSketch, ErrorOr[ResolutionProof]]()
def findDerivation( a: FOLClause, bs: List[ResolutionProof] ): Option[ResolutionProof] = {
for ( b <- bs; s <- clauseSubsumption( b.conclusion, a ) ) return Some( Subst.ifNecessary( b, s ) )
findDerivationViaResolution( a, bs.map( _.conclusion.asInstanceOf[HOLClause] ).toSet, prover ).
map( mapInputClauses( _ )( bs.map { p => p.conclusion -> p }.toMap ) )
}
def solve( s: RefutationSketch ): ErrorOr[ResolutionProof] = memo.getOrElseUpdate( s, s match {
case SketchAxiom( axiom ) => Right( Input( axiom ) )
case s @ SketchInference( conclusion, from ) =>
for {
solvedFrom <- from.toList.traverse( solve )
deriv <- findDerivation( s.conclusion, solvedFrom ).map { Right( _ ) }.
getOrElse { Left( UnprovableSketchInference( s ) ) }
} yield deriv
case SketchSplitCombine( cases ) =>
cases.toList.traverse( solve ).flatMap { solvedCases =>
solvedCases.find( p => p.conclusion.isEmpty && p.assertions.isEmpty ).
orElse( Sat4j.getResolutionProof( solvedCases.map( AvatarContradiction( _ ) ) ) ).
orElse( NonSplittingEscargot.getResolutionProof( solvedCases.map( AvatarContradiction( _ ) ) ) ).
map( Right( _ ) ).getOrElse( Left( UnprovableSketchInference( s ) ) )
}
case SketchComponentElim( from, comp ) =>
for ( solvedFrom <- solve( from ) )
yield AvatarSplit( solvedFrom, comp )
case SketchComponentIntro( comp ) =>
Right( AvatarComponent( comp ) )
} )
solve( sketch ) map { simplifyResolutionProof( _ ) }
}
}
|
gebner/gapt
|
core/src/main/scala/at/logic/gapt/proofs/sketch/refutationSketch.scala
|
Scala
|
gpl-3.0
| 5,571 |
/*
* Copyright (C) 2009-2018 Lightbend Inc. <https://www.lightbend.com>
*/
package scalaguide.xml.scalaxmlrequests {
import play.api.test._
import org.junit.runner.RunWith
import org.specs2.runner.JUnitRunner
import play.api.Application
import play.api.mvc._
import play.api.mvc.Results._
import scala.xml.NodeSeq
@RunWith(classOf[JUnitRunner])
class ScalaXmlRequestsSpec extends PlaySpecification {
private def parse(implicit app: Application) = app.injector.instanceOf(classOf[PlayBodyParsers])
private def Action[A](block: Request[AnyContent] => Result)(implicit app: Application) = app.injector.instanceOf(classOf[DefaultActionBuilder]).apply(block)
private def Action(bodyParser: BodyParser[NodeSeq])(block: Request[NodeSeq] => Result)(implicit app: Application) = app.injector.instanceOf(classOf[DefaultActionBuilder])(parse.xml).apply(block)
"A scala XML request" should {
"request body as xml" in new WithApplication {
//#xml-request-body-asXml
def sayHello = Action { request =>
request.body.asXml.map { xml =>
(xml \\\\ "name" headOption).map(_.text).map { name =>
Ok("Hello " + name)
}.getOrElse {
BadRequest("Missing parameter [name]")
}
}.getOrElse {
BadRequest("Expecting Xml data")
}
}
//#xml-request-body-asXml
private val request = FakeRequest().withXmlBody(<name>XF</name>).map(_.xml)
status(call(sayHello, request)) must beEqualTo(Helpers.OK)
}
"request body as xml body parser" in new WithApplication {
//#xml-request-body-parser
def sayHello = Action(parse.xml) { request =>
(request.body \\\\ "name" headOption).map(_.text).map { name =>
Ok("Hello " + name)
}.getOrElse {
BadRequest("Missing parameter [name]")
}
}
//#xml-request-body-parser
private val request = FakeRequest().withXmlBody(<name>XF</name>).map(_.xml)
status(call(sayHello, request)) must beEqualTo(Helpers.OK)
}
"request body as xml body parser and xml response" in new WithApplication {
//#xml-request-body-parser-xml-response
def sayHello = Action(parse.xml) { request =>
(request.body \\\\ "name" headOption).map(_.text).map { name =>
Ok(<message status="OK">Hello
{name}
</message>)
}.getOrElse {
BadRequest(<message status="KO">Missing parameter [name]</message>)
}
}
//#xml-request-body-parser-xml-response
private val request = FakeRequest().withXmlBody(<name>XF</name>).map(_.xml)
status(call(sayHello, request)) must beEqualTo(Helpers.OK)
}
}
}
}
|
Shenker93/playframework
|
documentation/manual/working/scalaGuide/main/xml/code/ScalaXmlRequests.scala
|
Scala
|
apache-2.0
| 2,819 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.coordinator.transaction
import kafka.common.KafkaException
import kafka.zk.KafkaZkClient
import org.easymock.{Capture, EasyMock, IAnswer}
import org.junit.{After, Test}
import org.junit.Assert._
class ProducerIdManagerTest {
private val zkClient = EasyMock.createNiceMock(classOf[KafkaZkClient])
@After
def tearDown(): Unit = {
EasyMock.reset(zkClient)
}
@Test
def testGetProducerId() {
var zkVersion: Option[Int] = None
var data: Array[Byte] = null
EasyMock.expect(zkClient.getDataAndVersion(EasyMock.anyString)).andAnswer(new IAnswer[(Option[Array[Byte]], Int)] {
override def answer(): (Option[Array[Byte]], Int) = zkVersion.map(Some(data) -> _).getOrElse(None, 0)
}).anyTimes()
val capturedVersion: Capture[Int] = EasyMock.newCapture()
val capturedData: Capture[Array[Byte]] = EasyMock.newCapture()
EasyMock.expect(zkClient.conditionalUpdatePath(EasyMock.anyString(),
EasyMock.capture(capturedData),
EasyMock.capture(capturedVersion),
EasyMock.anyObject[Option[(KafkaZkClient, String, Array[Byte]) => (Boolean, Int)]])).andAnswer(new IAnswer[(Boolean, Int)] {
override def answer(): (Boolean, Int) = {
val newZkVersion = capturedVersion.getValue + 1
zkVersion = Some(newZkVersion)
data = capturedData.getValue
(true, newZkVersion)
}
}).anyTimes()
EasyMock.replay(zkClient)
val manager1 = new ProducerIdManager(0, zkClient)
val manager2 = new ProducerIdManager(1, zkClient)
val pid1 = manager1.generateProducerId()
val pid2 = manager2.generateProducerId()
assertEquals(0, pid1)
assertEquals(ProducerIdManager.PidBlockSize, pid2)
for (i <- 1L until ProducerIdManager.PidBlockSize)
assertEquals(pid1 + i, manager1.generateProducerId())
for (i <- 1L until ProducerIdManager.PidBlockSize)
assertEquals(pid2 + i, manager2.generateProducerId())
assertEquals(pid2 + ProducerIdManager.PidBlockSize, manager1.generateProducerId())
assertEquals(pid2 + ProducerIdManager.PidBlockSize * 2, manager2.generateProducerId())
}
@Test(expected = classOf[KafkaException])
def testExceedProducerIdLimit() {
EasyMock.expect(zkClient.getDataAndVersion(EasyMock.anyString)).andAnswer(new IAnswer[(Option[Array[Byte]], Int)] {
override def answer(): (Option[Array[Byte]], Int) = {
val json = ProducerIdManager.generateProducerIdBlockJson(
ProducerIdBlock(0, Long.MaxValue - ProducerIdManager.PidBlockSize, Long.MaxValue))
(Some(json), 0)
}
}).anyTimes()
EasyMock.replay(zkClient)
new ProducerIdManager(0, zkClient)
}
}
|
MyPureCloud/kafka
|
core/src/test/scala/unit/kafka/coordinator/transaction/ProducerIdManagerTest.scala
|
Scala
|
apache-2.0
| 3,474 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.controller
import collection._
import collection.JavaConversions
import collection.mutable.Buffer
import java.util.concurrent.atomic.AtomicBoolean
import kafka.api.LeaderAndIsr
import kafka.common.{LeaderElectionNotNeededException, TopicAndPartition, StateChangeFailedException, NoReplicaOnlineException}
import kafka.utils.{Logging, ZkUtils}
import org.I0Itec.zkclient.{IZkDataListener, IZkChildListener}
import org.I0Itec.zkclient.exception.ZkNodeExistsException
import org.apache.log4j.Logger
import kafka.controller.Callbacks.CallbackBuilder
import kafka.utils.Utils._
/**
* This class represents the state machine for partitions. It defines the states that a partition can be in, and
* transitions to move the partition to another legal state. The different states that a partition can be in are -
* 1. NonExistentPartition: This state indicates that the partition was either never created or was created and then
* deleted. Valid previous state, if one exists, is OfflinePartition
* 2. NewPartition : After creation, the partition is in the NewPartition state. In this state, the partition should have
* replicas assigned to it, but no leader/isr yet. Valid previous states are NonExistentPartition
* 3. OnlinePartition : Once a leader is elected for a partition, it is in the OnlinePartition state.
* Valid previous states are NewPartition/OfflinePartition
* 4. OfflinePartition : If, after successful leader election, the leader for partition dies, then the partition
* moves to the OfflinePartition state. Valid previous states are NewPartition/OnlinePartition
*/
class PartitionStateMachine(controller: KafkaController) extends Logging {
private val controllerContext = controller.controllerContext
private val controllerId = controller.config.brokerId
private val zkClient = controllerContext.zkClient
var partitionState: mutable.Map[TopicAndPartition, PartitionState] = mutable.Map.empty
val brokerRequestBatch = new ControllerBrokerRequestBatch(controller)
private val hasStarted = new AtomicBoolean(false)
private val noOpPartitionLeaderSelector = new NoOpLeaderSelector(controllerContext)
this.logIdent = "[Partition state machine on Controller " + controllerId + "]: "
private val stateChangeLogger = KafkaController.stateChangeLogger
private var topicChangeListener: TopicChangeListener = null
private var deleteTopicsListener: DeleteTopicsListener = null
private var addPartitionsListener: mutable.Map[String, AddPartitionsListener] = mutable.Map.empty
/**
* Invoked on successful controller election. First registers a topic change listener since that triggers all
* state transitions for partitions. Initializes the state of partitions by reading from zookeeper. Then triggers
* the OnlinePartition state change for all new or offline partitions.
*/
def startup() {
// initialize partition state
initializePartitionState()
hasStarted.set(true)
// try to move partitions to online state
triggerOnlinePartitionStateChange()
info("Started partition state machine with initial state -> " + partitionState.toString())
}
// register topic and partition change listeners
def registerListeners() {
registerTopicChangeListener()
if(controller.config.deleteTopicEnable)
registerDeleteTopicListener()
}
/**
* Invoked on controller shutdown.
*/
def shutdown() {
hasStarted.set(false)
partitionState.clear()
}
/**
* This API invokes the OnlinePartition state change on all partitions in either the NewPartition or OfflinePartition
* state. This is called on a successful controller election and on broker changes
*/
def triggerOnlinePartitionStateChange() {
try {
brokerRequestBatch.newBatch()
// try to move all partitions in NewPartition or OfflinePartition state to OnlinePartition state except partitions
// that belong to topics to be deleted
for((topicAndPartition, partitionState) <- partitionState
if(!controller.deleteTopicManager.isTopicQueuedUpForDeletion(topicAndPartition.topic))) {
if(partitionState.equals(OfflinePartition) || partitionState.equals(NewPartition))
handleStateChange(topicAndPartition.topic, topicAndPartition.partition, OnlinePartition, controller.offlinePartitionSelector,
(new CallbackBuilder).build)
}
brokerRequestBatch.sendRequestsToBrokers(controller.epoch, controllerContext.correlationId.getAndIncrement)
} catch {
case e: Throwable => error("Error while moving some partitions to the online state", e)
// TODO: It is not enough to bail out and log an error, it is important to trigger leader election for those partitions
}
}
def partitionsInState(state: PartitionState): Set[TopicAndPartition] = {
partitionState.filter(p => p._2 == state).keySet
}
/**
* This API is invoked by the partition change zookeeper listener
* @param partitions The list of partitions that need to be transitioned to the target state
* @param targetState The state that the partitions should be moved to
*/
def handleStateChanges(partitions: Set[TopicAndPartition], targetState: PartitionState,
leaderSelector: PartitionLeaderSelector = noOpPartitionLeaderSelector,
callbacks: Callbacks = (new CallbackBuilder).build) {
info("Invoking state change to %s for partitions %s".format(targetState, partitions.mkString(",")))
try {
brokerRequestBatch.newBatch()
partitions.foreach { topicAndPartition =>
handleStateChange(topicAndPartition.topic, topicAndPartition.partition, targetState, leaderSelector, callbacks)
}
brokerRequestBatch.sendRequestsToBrokers(controller.epoch, controllerContext.correlationId.getAndIncrement)
}catch {
case e: Throwable => error("Error while moving some partitions to %s state".format(targetState), e)
// TODO: It is not enough to bail out and log an error, it is important to trigger state changes for those partitions
}
}
/**
* This API exercises the partition's state machine. It ensures that every state transition happens from a legal
* previous state to the target state. Valid state transitions are:
* NonExistentPartition -> NewPartition:
* --load assigned replicas from ZK to controller cache
*
* NewPartition -> OnlinePartition
* --assign first live replica as the leader and all live replicas as the isr; write leader and isr to ZK for this partition
* --send LeaderAndIsr request to every live replica and UpdateMetadata request to every live broker
*
* OnlinePartition,OfflinePartition -> OnlinePartition
* --select new leader and isr for this partition and a set of replicas to receive the LeaderAndIsr request, and write leader and isr to ZK
* --for this partition, send LeaderAndIsr request to every receiving replica and UpdateMetadata request to every live broker
*
* NewPartition,OnlinePartition,OfflinePartition -> OfflinePartition
* --nothing other than marking partition state as Offline
*
* OfflinePartition -> NonExistentPartition
* --nothing other than marking the partition state as NonExistentPartition
* @param topic The topic of the partition for which the state transition is invoked
* @param partition The partition for which the state transition is invoked
* @param targetState The end state that the partition should be moved to
*/
private def handleStateChange(topic: String, partition: Int, targetState: PartitionState,
leaderSelector: PartitionLeaderSelector,
callbacks: Callbacks) {
val topicAndPartition = TopicAndPartition(topic, partition)
if (!hasStarted.get)
throw new StateChangeFailedException(("Controller %d epoch %d initiated state change for partition %s to %s failed because " +
"the partition state machine has not started")
.format(controllerId, controller.epoch, topicAndPartition, targetState))
val currState = partitionState.getOrElseUpdate(topicAndPartition, NonExistentPartition)
try {
targetState match {
case NewPartition =>
// pre: partition did not exist before this
assertValidPreviousStates(topicAndPartition, List(NonExistentPartition), NewPartition)
assignReplicasToPartitions(topic, partition)
partitionState.put(topicAndPartition, NewPartition)
val assignedReplicas = controllerContext.partitionReplicaAssignment(topicAndPartition).mkString(",")
stateChangeLogger.trace("Controller %d epoch %d changed partition %s state from %s to %s with assigned replicas %s"
.format(controllerId, controller.epoch, topicAndPartition, currState, targetState,
assignedReplicas))
// post: partition has been assigned replicas
case OnlinePartition =>
assertValidPreviousStates(topicAndPartition, List(NewPartition, OnlinePartition, OfflinePartition), OnlinePartition)
partitionState(topicAndPartition) match {
case NewPartition =>
// initialize leader and isr path for new partition
initializeLeaderAndIsrForPartition(topicAndPartition)
case OfflinePartition =>
electLeaderForPartition(topic, partition, leaderSelector)
case OnlinePartition => // invoked when the leader needs to be re-elected
electLeaderForPartition(topic, partition, leaderSelector)
case _ => // should never come here since illegal previous states are checked above
}
partitionState.put(topicAndPartition, OnlinePartition)
val leader = controllerContext.partitionLeadershipInfo(topicAndPartition).leaderAndIsr.leader
stateChangeLogger.trace("Controller %d epoch %d changed partition %s from %s to %s with leader %d"
.format(controllerId, controller.epoch, topicAndPartition, currState, targetState, leader))
// post: partition has a leader
case OfflinePartition =>
// pre: partition should be in New or Online state
assertValidPreviousStates(topicAndPartition, List(NewPartition, OnlinePartition, OfflinePartition), OfflinePartition)
// should be called when the leader for a partition is no longer alive
stateChangeLogger.trace("Controller %d epoch %d changed partition %s state from %s to %s"
.format(controllerId, controller.epoch, topicAndPartition, currState, targetState))
partitionState.put(topicAndPartition, OfflinePartition)
// post: partition has no alive leader
case NonExistentPartition =>
// pre: partition should be in Offline state
assertValidPreviousStates(topicAndPartition, List(OfflinePartition), NonExistentPartition)
stateChangeLogger.trace("Controller %d epoch %d changed partition %s state from %s to %s"
.format(controllerId, controller.epoch, topicAndPartition, currState, targetState))
partitionState.put(topicAndPartition, NonExistentPartition)
// post: partition state is deleted from all brokers and zookeeper
}
} catch {
case t: Throwable =>
stateChangeLogger.error("Controller %d epoch %d initiated state change for partition %s from %s to %s failed"
.format(controllerId, controller.epoch, topicAndPartition, currState, targetState), t)
}
}
/**
* Invoked on startup of the partition's state machine to set the initial state for all existing partitions in
* zookeeper
*/
private def initializePartitionState() {
for((topicPartition, replicaAssignment) <- controllerContext.partitionReplicaAssignment) {
// check if leader and isr path exists for partition. If not, then it is in NEW state
controllerContext.partitionLeadershipInfo.get(topicPartition) match {
case Some(currentLeaderIsrAndEpoch) =>
// else, check if the leader for partition is alive. If yes, it is in Online state, else it is in Offline state
controllerContext.liveBrokerIds.contains(currentLeaderIsrAndEpoch.leaderAndIsr.leader) match {
case true => // leader is alive
partitionState.put(topicPartition, OnlinePartition)
case false =>
partitionState.put(topicPartition, OfflinePartition)
}
case None =>
partitionState.put(topicPartition, NewPartition)
}
}
}
private def assertValidPreviousStates(topicAndPartition: TopicAndPartition, fromStates: Seq[PartitionState],
targetState: PartitionState) {
if(!fromStates.contains(partitionState(topicAndPartition)))
throw new IllegalStateException("Partition %s should be in the %s states before moving to %s state"
.format(topicAndPartition, fromStates.mkString(","), targetState) + ". Instead it is in %s state"
.format(partitionState(topicAndPartition)))
}
/**
* Invoked on the NonExistentPartition->NewPartition state transition to update the controller's cache with the
* partition's replica assignment.
* @param topic The topic of the partition whose replica assignment is to be cached
* @param partition The partition whose replica assignment is to be cached
*/
private def assignReplicasToPartitions(topic: String, partition: Int) {
val assignedReplicas = ZkUtils.getReplicasForPartition(controllerContext.zkClient, topic, partition)
controllerContext.partitionReplicaAssignment += TopicAndPartition(topic, partition) -> assignedReplicas
}
/**
* Invoked on the NewPartition->OnlinePartition state change. When a partition is in the New state, it does not have
* a leader and isr path in zookeeper. Once the partition moves to the OnlinePartition state, it's leader and isr
* path gets initialized and it never goes back to the NewPartition state. From here, it can only go to the
* OfflinePartition state.
* @param topicAndPartition The topic/partition whose leader and isr path is to be initialized
*/
private def initializeLeaderAndIsrForPartition(topicAndPartition: TopicAndPartition) {
val replicaAssignment = controllerContext.partitionReplicaAssignment(topicAndPartition)
val liveAssignedReplicas = replicaAssignment.filter(r => controllerContext.liveBrokerIds.contains(r))
liveAssignedReplicas.size match {
case 0 =>
val failMsg = ("encountered error during state change of partition %s from New to Online, assigned replicas are [%s], " +
"live brokers are [%s]. No assigned replica is alive.")
.format(topicAndPartition, replicaAssignment.mkString(","), controllerContext.liveBrokerIds)
stateChangeLogger.error("Controller %d epoch %d ".format(controllerId, controller.epoch) + failMsg)
throw new StateChangeFailedException(failMsg)
case _ =>
debug("Live assigned replicas for partition %s are: [%s]".format(topicAndPartition, liveAssignedReplicas))
// make the first replica in the list of assigned replicas, the leader
val leader = liveAssignedReplicas.head
val leaderIsrAndControllerEpoch = new LeaderIsrAndControllerEpoch(new LeaderAndIsr(leader, liveAssignedReplicas.toList),
controller.epoch)
debug("Initializing leader and isr for partition %s to %s".format(topicAndPartition, leaderIsrAndControllerEpoch))
try {
ZkUtils.createPersistentPath(controllerContext.zkClient,
ZkUtils.getTopicPartitionLeaderAndIsrPath(topicAndPartition.topic, topicAndPartition.partition),
ZkUtils.leaderAndIsrZkData(leaderIsrAndControllerEpoch.leaderAndIsr, controller.epoch))
// NOTE: the above write can fail only if the current controller lost its zk session and the new controller
// took over and initialized this partition. This can happen if the current controller went into a long
// GC pause
controllerContext.partitionLeadershipInfo.put(topicAndPartition, leaderIsrAndControllerEpoch)
brokerRequestBatch.addLeaderAndIsrRequestForBrokers(liveAssignedReplicas, topicAndPartition.topic,
topicAndPartition.partition, leaderIsrAndControllerEpoch, replicaAssignment)
} catch {
case e: ZkNodeExistsException =>
// read the controller epoch
val leaderIsrAndEpoch = ZkUtils.getLeaderIsrAndEpochForPartition(zkClient, topicAndPartition.topic,
topicAndPartition.partition).get
val failMsg = ("encountered error while changing partition %s's state from New to Online since LeaderAndIsr path already " +
"exists with value %s and controller epoch %d")
.format(topicAndPartition, leaderIsrAndEpoch.leaderAndIsr.toString(), leaderIsrAndEpoch.controllerEpoch)
stateChangeLogger.error("Controller %d epoch %d ".format(controllerId, controller.epoch) + failMsg)
throw new StateChangeFailedException(failMsg)
}
}
}
/**
* Invoked on the OfflinePartition,OnlinePartition->OnlinePartition state change.
* It invokes the leader election API to elect a leader for the input offline partition
* @param topic The topic of the offline partition
* @param partition The offline partition
* @param leaderSelector Specific leader selector (e.g., offline/reassigned/etc.)
*/
def electLeaderForPartition(topic: String, partition: Int, leaderSelector: PartitionLeaderSelector) {
val topicAndPartition = TopicAndPartition(topic, partition)
// handle leader election for the partitions whose leader is no longer alive
stateChangeLogger.trace("Controller %d epoch %d started leader election for partition %s"
.format(controllerId, controller.epoch, topicAndPartition))
try {
var zookeeperPathUpdateSucceeded: Boolean = false
var newLeaderAndIsr: LeaderAndIsr = null
var replicasForThisPartition: Seq[Int] = Seq.empty[Int]
while(!zookeeperPathUpdateSucceeded) {
val currentLeaderIsrAndEpoch = getLeaderIsrAndEpochOrThrowException(topic, partition)
val currentLeaderAndIsr = currentLeaderIsrAndEpoch.leaderAndIsr
val controllerEpoch = currentLeaderIsrAndEpoch.controllerEpoch
if (controllerEpoch > controller.epoch) {
val failMsg = ("aborted leader election for partition [%s,%d] since the LeaderAndIsr path was " +
"already written by another controller. This probably means that the current controller %d went through " +
"a soft failure and another controller was elected with epoch %d.")
.format(topic, partition, controllerId, controllerEpoch)
stateChangeLogger.error("Controller %d epoch %d ".format(controllerId, controller.epoch) + failMsg)
throw new StateChangeFailedException(failMsg)
}
// elect new leader or throw exception
val (leaderAndIsr, replicas) = leaderSelector.selectLeader(topicAndPartition, currentLeaderAndIsr)
val (updateSucceeded, newVersion) = ZkUtils.conditionalUpdatePersistentPath(zkClient,
ZkUtils.getTopicPartitionLeaderAndIsrPath(topic, partition),
ZkUtils.leaderAndIsrZkData(leaderAndIsr, controller.epoch), currentLeaderAndIsr.zkVersion)
newLeaderAndIsr = leaderAndIsr
newLeaderAndIsr.zkVersion = newVersion
zookeeperPathUpdateSucceeded = updateSucceeded
replicasForThisPartition = replicas
}
val newLeaderIsrAndControllerEpoch = new LeaderIsrAndControllerEpoch(newLeaderAndIsr, controller.epoch)
// update the leader cache
controllerContext.partitionLeadershipInfo.put(TopicAndPartition(topic, partition), newLeaderIsrAndControllerEpoch)
stateChangeLogger.trace("Controller %d epoch %d elected leader %d for Offline partition %s"
.format(controllerId, controller.epoch, newLeaderAndIsr.leader, topicAndPartition))
val replicas = controllerContext.partitionReplicaAssignment(TopicAndPartition(topic, partition))
// store new leader and isr info in cache
brokerRequestBatch.addLeaderAndIsrRequestForBrokers(replicasForThisPartition, topic, partition,
newLeaderIsrAndControllerEpoch, replicas)
} catch {
case lenne: LeaderElectionNotNeededException => // swallow
case nroe: NoReplicaOnlineException => throw nroe
case sce: Throwable =>
val failMsg = "encountered error while electing leader for partition %s due to: %s.".format(topicAndPartition, sce.getMessage)
stateChangeLogger.error("Controller %d epoch %d ".format(controllerId, controller.epoch) + failMsg)
throw new StateChangeFailedException(failMsg, sce)
}
debug("After leader election, leader cache is updated to %s".format(controllerContext.partitionLeadershipInfo.map(l => (l._1, l._2))))
}
private def registerTopicChangeListener() = {
topicChangeListener = new TopicChangeListener()
zkClient.subscribeChildChanges(ZkUtils.BrokerTopicsPath, topicChangeListener)
}
def registerPartitionChangeListener(topic: String) = {
addPartitionsListener.put(topic, new AddPartitionsListener(topic))
zkClient.subscribeDataChanges(ZkUtils.getTopicPath(topic), addPartitionsListener(topic))
}
def deregisterPartitionChangeListener(topic: String) = {
zkClient.unsubscribeDataChanges(ZkUtils.getTopicPath(topic), addPartitionsListener(topic))
}
private def registerDeleteTopicListener() = {
deleteTopicsListener = new DeleteTopicsListener()
zkClient.subscribeChildChanges(ZkUtils.DeleteTopicsPath, deleteTopicsListener)
}
private def getLeaderIsrAndEpochOrThrowException(topic: String, partition: Int): LeaderIsrAndControllerEpoch = {
val topicAndPartition = TopicAndPartition(topic, partition)
ZkUtils.getLeaderIsrAndEpochForPartition(zkClient, topic, partition) match {
case Some(currentLeaderIsrAndEpoch) => currentLeaderIsrAndEpoch
case None =>
val failMsg = "LeaderAndIsr information doesn't exist for partition %s in %s state"
.format(topicAndPartition, partitionState(topicAndPartition))
throw new StateChangeFailedException(failMsg)
}
}
/**
* This is the zookeeper listener that triggers all the state transitions for a partition
*/
class TopicChangeListener extends IZkChildListener with Logging {
this.logIdent = "[TopicChangeListener on Controller " + controller.config.brokerId + "]: "
@throws(classOf[Exception])
def handleChildChange(parentPath : String, children : java.util.List[String]) {
inLock(controllerContext.controllerLock) {
if (hasStarted.get) {
try {
val currentChildren = {
import JavaConversions._
debug("Topic change listener fired for path %s with children %s".format(parentPath, children.mkString(",")))
(children: Buffer[String]).toSet
}
val newTopics = currentChildren -- controllerContext.allTopics
val deletedTopics = controllerContext.allTopics -- currentChildren
controllerContext.allTopics = currentChildren
val addedPartitionReplicaAssignment = ZkUtils.getReplicaAssignmentForTopics(zkClient, newTopics.toSeq)
controllerContext.partitionReplicaAssignment = controllerContext.partitionReplicaAssignment.filter(p =>
!deletedTopics.contains(p._1.topic))
controllerContext.partitionReplicaAssignment.++=(addedPartitionReplicaAssignment)
info("New topics: [%s], deleted topics: [%s], new partition replica assignment [%s]".format(newTopics,
deletedTopics, addedPartitionReplicaAssignment))
if(newTopics.size > 0)
controller.onNewTopicCreation(newTopics, addedPartitionReplicaAssignment.keySet.toSet)
} catch {
case e: Throwable => error("Error while handling new topic", e )
}
}
}
}
}
/**
* Delete topics includes the following operations -
* 1. Add the topic to be deleted to the delete topics cache, only if the topic exists
* 2. If there are topics to be deleted, it signals the delete topic thread
*/
class DeleteTopicsListener() extends IZkChildListener with Logging {
this.logIdent = "[DeleteTopicsListener on " + controller.config.brokerId + "]: "
val zkClient = controllerContext.zkClient
/**
* Invoked when a topic is being deleted
* @throws Exception On any error.
*/
@throws(classOf[Exception])
def handleChildChange(parentPath : String, children : java.util.List[String]) {
inLock(controllerContext.controllerLock) {
var topicsToBeDeleted = {
import JavaConversions._
(children: Buffer[String]).toSet
}
debug("Delete topics listener fired for topics %s to be deleted".format(topicsToBeDeleted.mkString(",")))
val nonExistentTopics = topicsToBeDeleted.filter(t => !controllerContext.allTopics.contains(t))
if(nonExistentTopics.size > 0) {
warn("Ignoring request to delete non-existing topics " + nonExistentTopics.mkString(","))
nonExistentTopics.foreach(topic => ZkUtils.deletePathRecursive(zkClient, ZkUtils.getDeleteTopicPath(topic)))
}
topicsToBeDeleted --= nonExistentTopics
if(topicsToBeDeleted.size > 0) {
info("Starting topic deletion for topics " + topicsToBeDeleted.mkString(","))
// add topic to deletion list
controller.deleteTopicManager.enqueueTopicsForDeletion(topicsToBeDeleted)
// mark topic ineligible for deletion if other state changes are in progress
topicsToBeDeleted.foreach { topic =>
val preferredReplicaElectionInProgress =
controllerContext.partitionsUndergoingPreferredReplicaElection.map(_.topic).contains(topic)
val partitionReassignmentInProgress =
controllerContext.partitionsBeingReassigned.keySet.map(_.topic).contains(topic)
if(preferredReplicaElectionInProgress || partitionReassignmentInProgress)
controller.deleteTopicManager.markTopicIneligibleForDeletion(Set(topic))
}
}
}
}
/**
*
* @throws Exception
* On any error.
*/
@throws(classOf[Exception])
def handleDataDeleted(dataPath: String) {
}
}
class AddPartitionsListener(topic: String) extends IZkDataListener with Logging {
this.logIdent = "[AddPartitionsListener on " + controller.config.brokerId + "]: "
@throws(classOf[Exception])
def handleDataChange(dataPath : String, data: Object) {
inLock(controllerContext.controllerLock) {
try {
info("Add Partition triggered " + data.toString + " for path " + dataPath)
val partitionReplicaAssignment = ZkUtils.getReplicaAssignmentForTopics(zkClient, List(topic))
val partitionsToBeAdded = partitionReplicaAssignment.filter(p =>
!controllerContext.partitionReplicaAssignment.contains(p._1))
if(controller.deleteTopicManager.isTopicQueuedUpForDeletion(topic))
error("Skipping adding partitions %s for topic %s since it is currently being deleted"
.format(partitionsToBeAdded.map(_._1.partition).mkString(","), topic))
else {
if (partitionsToBeAdded.size > 0) {
info("New partitions to be added %s".format(partitionsToBeAdded))
controller.onNewPartitionCreation(partitionsToBeAdded.keySet.toSet)
}
}
} catch {
case e: Throwable => error("Error while handling add partitions for data path " + dataPath, e )
}
}
}
@throws(classOf[Exception])
def handleDataDeleted(parentPath : String) {
// this is not implemented for partition change
}
}
}
sealed trait PartitionState { def state: Byte }
case object NewPartition extends PartitionState { val state: Byte = 0 }
case object OnlinePartition extends PartitionState { val state: Byte = 1 }
case object OfflinePartition extends PartitionState { val state: Byte = 2 }
case object NonExistentPartition extends PartitionState { val state: Byte = 3 }
|
unix1986/universe
|
tool/kafka-0.8.1.1-src/core/src/main/scala/kafka/controller/PartitionStateMachine.scala
|
Scala
|
bsd-2-clause
| 29,647 |
package com.nutomic.ensichat.integration
import java.io.File
import java.util.concurrent.{CountDownLatch, TimeUnit}
import java.util.{Timer, TimerTask}
import com.nutomic.ensichat.core.messages.body.Text
import com.nutomic.ensichat.core.util.Crypto
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration.Duration
import scala.concurrent.{Await, Future}
import scala.util.Try
import scalax.file.Path
/**
* Creates some local nodes, connects them and sends messages between them.
*
* If the test runs slow or fails, changing [[Crypto.PublicKeySize]] to 512 should help.
*
* These tests are somewhat fragile, and might fail randomly. It helps to run only one of
* the test functions at a time.
*/
object Main extends App {
testNeighborSending()
testMeshMessageSending()
testIndirectRelay()
testNeighborRelay()
testMessageDeliveryOnConnect()
testSendDelayed()
testRouteChange()
testMessageConfirmation()
testKeyRequest()
private def testNeighborSending(): Unit = {
val node1 = Await.result(createNode(1), Duration.Inf)
val node2 = Await.result(createNode(2), Duration.Inf)
connectNodes(node1, node2)
sendMessage(node1, node2)
Set(node1, node2).foreach(_.stop())
System.out.println("Test neighbor sending successful!")
}
private def testNeighborRelay(): Unit = {
val nodes = createNodes(3)
connectNodes(nodes(0), nodes(1))
val timer = new Timer()
timer.schedule(new TimerTask {
override def run(): Unit = {
nodes(0).stop()
connectNodes(nodes(1), nodes(2))
}
}, Duration(10, TimeUnit.SECONDS).toMillis)
sendMessage(nodes(0), nodes(2), 30)
timer.cancel()
nodes.foreach(_.stop())
System.out.println("Test neighbor relay successful!")
}
private def testIndirectRelay(): Unit = {
val nodes = createNodes(5)
connectNodes(nodes(0), nodes(1))
connectNodes(nodes(1), nodes(2))
connectNodes(nodes(2), nodes(3))
val timer = new Timer()
timer.schedule(new TimerTask {
override def run(): Unit = {
nodes(0).stop()
connectNodes(nodes(3), nodes(4))
}
}, Duration(10, TimeUnit.SECONDS).toMillis)
sendMessage(nodes(0), nodes(4), 30)
timer.cancel()
nodes.foreach(_.stop())
System.out.println("Test indirect sending successful!")
}
private def testMeshMessageSending(): Unit = {
val nodes = createMesh()
sendMessages(nodes)
nodes.foreach(_.stop())
System.out.println("Test mesh message sending successful!")
}
/**
* Stop node 1, forcing route errors and messages to use the (longer) path via nodes 7 and 8.
*/
private def testRouteChange() {
val nodes = createMesh()
sendMessages(nodes)
nodes(1).stop()
Thread.sleep(10 * 1000)
sendMessages(nodes)
nodes.foreach(_.stop())
System.out.println("Test route change successful!")
}
/**
* Create new node 9, send message from node 0 to its address, before actually connecting it.
* The message is automatically delivered when node 9 connects as neighbor.
*/
private def testMessageDeliveryOnConnect() {
val nodes = createMesh()
val node9 = Await.result(createNode(9), Duration.Inf)
val timer = new Timer()
timer.schedule(new TimerTask {
override def run(): Unit = {
connectNodes(nodes(0), node9)
timer.cancel()
}
}, Duration(10, TimeUnit.SECONDS).toMillis)
sendMessage(nodes(0), node9, 30)
(nodes :+ node9).foreach(_.stop())
System.out.println("Test message delivery on connect successful!")
}
/**
* Create new node 10, send message from node 7 to its address, before connecting it to the mesh.
* The message is delivered after node 7 starts a route discovery triggered by the message buffer.
*/
private def testSendDelayed(): Unit = {
val nodes = createMesh()
val timer = new Timer()
val node10 = Await.result(createNode(10), Duration.Inf)
timer.schedule(new TimerTask {
override def run(): Unit = {
connectNodes(nodes(0), node10)
timer.cancel()
}
}, Duration(5, TimeUnit.SECONDS).toMillis)
sendMessage(nodes(7), node10, 30)
(nodes :+ node10).foreach(_.stop())
System.out.println("Test send delayed successful!")
}
/**
* Check that message confirmation is sent back after message was received.
*/
private def testMessageConfirmation(): Unit = {
val nodes = createNodes(2)
connectNodes(nodes(0), nodes(1))
sendMessage(nodes(0), nodes(1))
assert(nodes(0).database.getMessages(nodes(1).crypto.localAddress).nonEmpty)
assert(nodes(0).database.getUnconfirmedMessages.isEmpty)
nodes.foreach(_.stop())
}
private def testKeyRequest(): Unit = {
val nodes = createNodes(4)
connectNodes(nodes(0), nodes(1))
connectNodes(nodes(1), nodes(2))
connectNodes(nodes(2), nodes(3))
val origin = nodes(0)
val target = nodes(3)
System.out.println(s"sendMessage(${origin.index}, ${target.index})")
val text = s"${origin.index} to ${target.index}"
origin.connectionHandler.sendTo(target.crypto.localAddress, new Text(text))
val latch = new CountDownLatch(1)
Future {
val exists =
target.eventQueue.toStream.exists { event =>
if (event._1 != LocalNode.EventType.MessageReceived)
false
else {
event._2.get.body match {
case t: Text => t.text == text
case _ => false
}
}
}
assert(exists, s"message from ${origin.index} did not arrive at ${target.index}")
latch.countDown()
}
assert(latch.await(3, TimeUnit.SECONDS))
}
private def createNodes(count: Int): Seq[LocalNode] = {
val nodes = Await.result(Future.sequence((0 until count).map(createNode)), Duration.Inf)
nodes.foreach(n => System.out.println(s"Node ${n.index} has address ${n.crypto.localAddress}"))
nodes
}
/**
* Creates a new mesh with a predefined layout.
*
* Graphical representation:
* 8 —— 7
* / \
* 0———1———3———4
* \ / | |
* 2 5———6
*
* @return List of [[LocalNode]]s, ordered from 0 to 8.
*/
private def createMesh(): Seq[LocalNode] = {
val nodes = createNodes(9)
connectNodes(nodes(0), nodes(1))
connectNodes(nodes(0), nodes(2))
connectNodes(nodes(1), nodes(2))
connectNodes(nodes(1), nodes(3))
connectNodes(nodes(3), nodes(4))
connectNodes(nodes(3), nodes(5))
connectNodes(nodes(4), nodes(6))
connectNodes(nodes(5), nodes(6))
connectNodes(nodes(3), nodes(7))
connectNodes(nodes(0), nodes(8))
connectNodes(nodes(7), nodes(8))
nodes
}
private def createNode(index: Int): Future[LocalNode] = {
val configFolder = new File(s"build/node$index/")
Path(configFolder).deleteRecursively()
Future(new LocalNode(index, configFolder))
}
private def connectNodes(first: LocalNode, second: LocalNode): Unit = {
first.connectionHandler.connect(s"localhost:${second.port}")
first.eventQueue.toStream.find(_._1 == LocalNode.EventType.ConnectionsChanged)
second.eventQueue.toStream.find(_._1 == LocalNode.EventType.ConnectionsChanged)
val firstAddress = first.crypto.localAddress
val secondAddress = second.crypto.localAddress
val firstConnections = first.connectionHandler.connections()
val secondConnections = second.connectionHandler.connections()
assert(firstConnections.contains(secondAddress),
s"${first.index} is not connected to ${second.index}")
assert(secondConnections.contains(firstAddress),
s"${second.index} is not connected to ${second.index}")
System.out.println(s"${first.index} and ${second.index} connected")
}
private def sendMessages(nodes: Seq[LocalNode]): Unit = {
sendMessage(nodes(2), nodes(0))
sendMessage(nodes(0), nodes(2))
sendMessage(nodes(4), nodes(3))
sendMessage(nodes(3), nodes(5))
sendMessage(nodes(4), nodes(6))
sendMessage(nodes(2), nodes(3))
sendMessage(nodes(3), nodes(6))
sendMessage(nodes(3), nodes(2))
}
private def sendMessage(from: LocalNode, to: LocalNode, waitSeconds: Int = 1): Unit = {
addKey(to.crypto, from.crypto)
addKey(from.crypto, to.crypto)
System.out.println(s"sendMessage(${from.index}, ${to.index})")
val text = s"${from.index} to ${to.index}"
from.connectionHandler.sendTo(to.crypto.localAddress, new Text(text))
val latch = new CountDownLatch(1)
Future {
val exists =
to.eventQueue.toStream.exists { event =>
if (event._1 != LocalNode.EventType.MessageReceived)
false
else {
event._2.get.body match {
case t: Text => t.text == text
case _ => false
}
}
}
assert(exists, s"message from ${from.index} did not arrive at ${to.index}")
latch.countDown()
}
assert(latch.await(waitSeconds, TimeUnit.SECONDS))
}
private def addKey(addTo: Crypto, addFrom: Crypto): Unit = {
if (Try(addTo.getPublicKey(addFrom.localAddress)).isFailure)
addTo.addPublicKey(addFrom.localAddress, addFrom.getLocalPublicKey)
}
}
|
Nutomic/ensichat
|
integration/src/main/scala/com.nutomic.ensichat.integration/Main.scala
|
Scala
|
mpl-2.0
| 9,306 |
package gs.nick
import scala.concurrent.Future
trait GamesDaoTrait {
def getAllGames: Future[Seq[DbGame]]
def getAllBySystem(systemId: Int): Future[Seq[DbGame]]
def getGame(id: Int): Future[Option[DbGame]]
def addGame(game: DbGame): Future[Int]
}
|
nickfun/api-games
|
src/main/scala/gs/nick/GamesDaoTrait.scala
|
Scala
|
gpl-3.0
| 260 |
package net.sf.latexdraw.parsers.pst.parser
import java.awt.Color
import net.sf.latexdraw.glib.models.interfaces.IShape
import net.sf.latexdraw.glib.views.latex.DviPsColors
/**
* A parser grouping parsers parsing text commands.<br>
*<br>
* This file is part of LaTeXDraw<br>
* Copyright (c) 2005-2013 Arnaud BLOUIN<br>
*<br>
* LaTeXDraw is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.<br>
*<br>
* LaTeXDraw is distributed without any warranty; without even the
* implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
* PURPOSE. See the GNU General Public License for more details.<br>
*<br>
* 2012-10-14<br>
* @author Arnaud BLOUIN
* @version 3.0
*/
trait TextCommandsParser extends PSTAbstractParser with PSTBracketBlockParser with IPSTCodeParser {
/**
* Parses commands handling texts.
*/
def parsetextCommands(ctx:PSTContext) : Parser[List[IShape]] =
parseUseFontCommand(ctx) | parseColorCommand(ctx) | parseTextcolorCommand(ctx) | parseTextSizeCommand(ctx) |
parsetextCommandWithBlock(ctx) | parsetextCommandWithNoBlock(ctx) | parseFamilyFontCommand(ctx) |
parseSerieFontCommand(ctx) | parseShapeFontCommand(ctx)
/** Parses the font families. */
private def parseShapeFontCommand(ctx:PSTContext):Parser[List[IShape]] =
("\\\\upshape" | "\\\\itshape" | "\\\\slshape" | "\\\\scshape" | "\\\\it" | "\\\\sc" | "\\\\sl") ^^ {case cmd =>
ctx.textParsed +=cmd
cmd match {
case "\\\\upshape" => ctx.currFontShape = fontShape.normal
case "\\\\itshape" => ctx.currFontShape = fontShape.italic
case "\\\\it" => ctx.currFontShape = fontShape.italic
case "\\\\slshape" => ctx.currFontShape = fontShape.slanted
case "\\\\sl" => ctx.currFontShape = fontShape.slanted
case "\\\\scshape" => ctx.currFontShape = fontShape.smallCaps
case "\\\\sc" => ctx.currFontShape = fontShape.smallCaps
case _ =>
}
Nil
}
/** Parses the font families. */
private def parseSerieFontCommand(ctx:PSTContext):Parser[List[IShape]] = ("\\\\mdseries" | "\\\\bfseries" | "\\\\bf") ^^ {case cmd =>
ctx.textParsed +=cmd
cmd match {
case "\\\\mdseries" => ctx.currFontSerie = fontSerie.normal
case "\\\\bfseries" => ctx.currFontSerie = fontSerie.bf
case "\\\\bf" => ctx.currFontSerie = fontSerie.bf
case _ =>
}
Nil
}
/** Parses the font families. */
private def parseFamilyFontCommand(ctx:PSTContext):Parser[List[IShape]] = ("\\\\rmfamily" | "\\\\sffamily" | "\\\\ttfamily") ^^ {case cmd =>
ctx.textParsed +=cmd
cmd match {
case "\\\\rmfamily" => ctx.currFontFamily = fontFamily.rm
case "\\\\sffamily" => ctx.currFontFamily = fontFamily.sf
case "\\\\ttfamily" => ctx.currFontFamily = fontFamily.tt
case _ =>
}
Nil
}
/** Parses the accent commands having no bracket block. */
private def parsetextCommandWithNoBlock(ctx:PSTContext):Parser[List[IShape]] = ("\\\\l") ^^ {case cmd =>
ctx.textParsed +=cmd
ctx.parsedTxtNoTxt = false
Nil
}
/** Parses the accent commands that may ba a bracket block. */
private def parsetextCommandWithBlock(ctx:PSTContext):Parser[List[IShape]] =
("\\\\`" | "\\\\'" | "\\\\^" | "\\\\\\"" | "\\\\H" | "\\\\~" | "\\\\c" | "\\\\k" | "\\\\=" | "\\\\b" | "\\\\." | "\\\\d" |
"\\\\r" | "\\\\u" | "\\\\v" | "\\\\t" | "\\\\textsf" | "\\\\textsc" | "\\\\textsl" | "\\\\underline" |
"\\\\texttt" | "\\\\emph" | "\\\\textbf" | "\\\\textit") ~ opt(parseBracket(ctx)) ^^ {
case cmd ~ txt =>
ctx.textParsed +=cmd
if(txt.isDefined) ctx.textParsed +="{"+txt.get+"}"
ctx.parsedTxtNoTxt = false
Nil
}
/** Parses the text size commands. */
private def parseTextSizeCommand(ctx:PSTContext):Parser[List[IShape]] =
("\\\\tiny" | "\\\\scriptsize" | "\\\\footnotesize" | "\\\\small" | "\\\\normalsize" | "\\\\large" | "\\\\Large" | "\\\\huge" | "\\\\Huge") ^^ {
case cmd =>
ctx.textParsed +=cmd
ctx.parsedTxtNoTxt = false
Nil
}
/** Parses the command \\textcolor */
private def parseTextcolorCommand(ctx:PSTContext) : Parser[List[IShape]] = {
val newCtx = new PSTContext(ctx)
"\\\\textcolor" ~ parseColorBlock(newCtx) ~ parsePSTBlock(newCtx, newCtx.isPsCustom) ^^ {
case _ ~ _ ~ shapes => List(shapes)
}
}
/** Parses the colour contained in the block. */
private def parseColorBlock(ctx:PSTContext) : Parser[Unit] = {
parseBracket(ctx) ^^ {
case colourTxt =>
DviPsColors.INSTANCE.getColour(colourTxt) match {
case c:Color => ctx.textColor = c
case _ =>
}
}
}
/** Parses the command \\color */
private def parseColorCommand(ctx:PSTContext) : Parser[List[IShape]] = "\\\\color" ~ parseColorBlock(ctx) ^^ {
case _ ~ _ => Nil
}
/** Parses the usefont command. */
private def parseUseFontCommand(ctx:PSTContext) : Parser[List[IShape]] =
"\\\\usefont" ~ parseBracket(ctx) ~ parseBracket(ctx) ~ parseBracket(ctx) ~ parseBracket(ctx) ^^ {
case _ ~ encoding ~ family ~ series ~ shapes =>
ctx.textParsed += "\\\\usefont{"+encoding+"}{"+family+"}{"+series+"}{"+shapes+"}"
fontShape.toFontShape(shapes) match {
case Some(value) => ctx.currFontShape = value
case _ =>
}
fontFamily.toFontFamily(family) match {
case Some(value) => ctx.currFontFamily = value
case _ =>
}
fontSerie.toFontSerie(series) match {
case Some(value) => ctx.currFontSerie = value
case _ =>
}
Nil
}
}
|
arnobl/latexdraw-mutants
|
GUImutants/original/net.sf.latexdraw/src/main/net/sf/latexdraw/parsers/pst/parser/TextCommandsParser.scala
|
Scala
|
gpl-2.0
| 5,373 |
package io.travisbrown.abstracted.internal
import scala.reflect.ClassTag
private[abstracted] trait MacrosCompat {
type Context = scala.reflect.macros.Context
def resultType(c: Context)(tpe: c.Type)(implicit
/**
* See SI-5143 for discussion of why we need this class tag.
*/
tag: ClassTag[c.universe.MethodType]
): c.Type = {
import c.universe.MethodType
tpe match {
case MethodType(_, res) => resultType(c)(res)
case other => other
}
}
def implicitViews(c: Context)(source: c.Type)(implicit
/**
* See SI-5143 for discussion of why we need this class tag.
*/
tag: ClassTag[c.universe.TypeRef]
): List[c.Tree] = {
import c.universe.{ EmptyTree, TypeRef }
c.enclosingImplicits.collect {
case (TypeRef(_, _, _ :: target :: _), _) =>
c.inferImplicitView(EmptyTree, source, target)
}.filterNot(_.isEmpty)
}
}
|
travisbrown/abstracted
|
core/src/main/scala-2.10/io/travisbrown/abstracted/internal/MacrosCompat.scala
|
Scala
|
apache-2.0
| 909 |
package chat.tox.antox.activities
import java.io.File
import android.content.Intent
import android.graphics.PorterDuff
import android.os.{Build, Bundle}
import android.support.v7.app.AppCompatActivity
import android.text.{Editable, TextWatcher}
import android.view.View
import android.widget.{EditText, TextView}
import chat.tox.antox.R
import chat.tox.antox.data.State
import chat.tox.antox.utils.BitmapManager
import chat.tox.antox.wrapper.ToxKey
import com.shamanland.fab.FloatingActionButton
import de.hdodenhof.circleimageview.CircleImageView
class FriendProfileActivity extends AppCompatActivity {
var friendKey: ToxKey = null
var nickChanged: Boolean = false
override def onCreate(savedInstanceState: Bundle) {
super.onCreate(savedInstanceState)
setContentView(R.layout.activity_friend_profile)
if (Build.VERSION.SDK_INT < Build.VERSION_CODES.HONEYCOMB) {
getSupportActionBar.setIcon(R.drawable.ic_actionbar)
}
friendKey = new ToxKey(getIntent.getStringExtra("key"))
val db = State.db
val friendNote = db.getContactStatusMessage(friendKey)
setTitle(getResources.getString(R.string.friend_profile_title, getIntent.getStringExtra("name")))
val editFriendAlias = findViewById(R.id.friendAlias).asInstanceOf[EditText]
editFriendAlias.setText(getIntent.getStringExtra("name"))
editFriendAlias.addTextChangedListener(new TextWatcher() {
override def afterTextChanged(s: Editable) {
/* Set nick changed to true in order to save change in onPause() */
nickChanged = true
/* Update title to reflect new nick */
setTitle(getResources.getString(R.string.friend_profile_title, editFriendAlias.getText.toString))
}
override def beforeTextChanged(s: CharSequence, start: Int, count: Int, after: Int) {}
override def onTextChanged(s: CharSequence, start: Int, before: Int, count: Int) {}
})
// Set cursor to end of edit text field
editFriendAlias.setSelection(editFriendAlias.length(), editFriendAlias.length())
val editFriendNote = findViewById(R.id.friendNoteText).asInstanceOf[TextView]
editFriendNote.setText("\\"" + friendNote + "\\"")
val avatar = getIntent.getSerializableExtra("avatar").asInstanceOf[Option[File]]
avatar.foreach(avatar => {
val avatarHolder = findViewById(R.id.avatar).asInstanceOf[CircleImageView]
BitmapManager.load(avatar, avatarHolder, isAvatar = true)
})
updateFab(db.getFriendInfo(friendKey).favorite)
}
override def onBackPressed() {
super.onBackPressed()
val intent = new Intent(FriendProfileActivity.this, classOf[MainActivity])
intent.addCategory(Intent.CATEGORY_HOME)
intent.setFlags(Intent.FLAG_ACTIVITY_CLEAR_TOP)
FriendProfileActivity.this.startActivity(intent)
finish()
}
/**
* Override onPause() in order to save any nickname changes
*/
override def onPause() {
super.onPause()
/* Update friend alias after text has been changed */
if (nickChanged) {
val editFriendAlias = findViewById(R.id.friendAlias).asInstanceOf[EditText]
State.db.updateAlias(editFriendAlias.getText.toString, friendKey)
}
}
def onClickFavorite(view: View): Unit = {
val db = State.db
val favorite = !db.getFriendInfo(friendKey).favorite
db.updateContactFavorite(friendKey, favorite)
updateFab(favorite)
}
def updateFab(favorite: Boolean): Unit = {
val fab = findViewById(R.id.favorite_button).asInstanceOf[FloatingActionButton]
fab.setSize(FloatingActionButton.SIZE_NORMAL)
fab.setColor(getResources.getColor(if (favorite) R.color.material_red_a700 else R.color.white))
if (favorite) {
val drawable = getResources.getDrawable(R.drawable.ic_star_black_24dp)
drawable.setColorFilter(R.color.brand_primary, PorterDuff.Mode.MULTIPLY)
fab.setImageDrawable(drawable)
} else {
fab.setImageDrawable(
getResources.getDrawable(R.drawable.ic_star_outline_black_24dp))
}
fab.initBackground()
}
}
|
gale320/Antox
|
app/src/main/scala/chat/tox/antox/activities/FriendProfileActivity.scala
|
Scala
|
gpl-3.0
| 4,029 |
/**
* Copyright 2011-2017 GatlingCorp (http://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.http.response
import java.nio.charset.Charset
import java.security.MessageDigest
import scala.math.max
import io.gatling.commons.util.Collections._
import io.gatling.commons.util.StringHelper.bytes2Hex
import io.gatling.commons.util.ClockSingleton.nowMillis
import io.gatling.core.config.GatlingConfiguration
import io.gatling.core.stats.message.ResponseTimings
import io.gatling.http.HeaderNames
import io.gatling.http.check.HttpCheck
import io.gatling.http.check.checksum.ChecksumCheck
import io.gatling.http.util.HttpHelper.{ extractCharsetFromContentType, isCss, isHtml, isTxt }
import com.typesafe.scalalogging.StrictLogging
import io.netty.buffer.ByteBuf
import io.netty.handler.codec.http.{ HttpHeaders, DefaultHttpHeaders }
import org.asynchttpclient._
import org.asynchttpclient.netty.request.NettyRequest
import org.asynchttpclient.netty.LazyResponseBodyPart
object ResponseBuilder extends StrictLogging {
val EmptyHeaders = new DefaultHttpHeaders
val Identity: Response => Response = identity[Response]
private val IsDebugEnabled = logger.underlying.isDebugEnabled
def newResponseBuilderFactory(
checks: List[HttpCheck],
responseTransformer: Option[PartialFunction[Response, Response]],
discardResponseChunks: Boolean,
inferHtmlResources: Boolean,
configuration: GatlingConfiguration
): ResponseBuilderFactory = {
val checksumChecks = checks.collect {
case checksumCheck: ChecksumCheck => checksumCheck
}
val responseBodyUsageStrategies = checks.flatMap(_.responseBodyUsageStrategy).toSet
val storeBodyParts = IsDebugEnabled || !discardResponseChunks || responseBodyUsageStrategies.nonEmpty || responseTransformer.isDefined
request => new ResponseBuilder(
request,
checksumChecks,
responseBodyUsageStrategies,
responseTransformer,
storeBodyParts,
inferHtmlResources,
configuration.core.charset
)
}
}
class ResponseBuilder(
request: Request,
checksumChecks: List[ChecksumCheck],
bodyUsageStrategies: Set[ResponseBodyUsageStrategy],
responseTransformer: Option[PartialFunction[Response, Response]],
storeBodyParts: Boolean,
inferHtmlResources: Boolean,
defaultCharset: Charset
) {
val computeChecksums = checksumChecks.nonEmpty
@volatile var storeHtmlOrCss: Boolean = _
@volatile var startTimestamp: Long = _
@volatile var endTimestamp: Long = _
@volatile private var _reset: Boolean = _
@volatile private var status: Option[HttpResponseStatus] = None
@volatile private var headers: HttpHeaders = ResponseBuilder.EmptyHeaders
@volatile private var chunks: List[ByteBuf] = Nil
@volatile private var digests: Map[String, MessageDigest] = initDigests()
@volatile private var nettyRequest: Option[NettyRequest] = None
def initDigests(): Map[String, MessageDigest] =
if (computeChecksums)
checksumChecks.foldLeft(Map.empty[String, MessageDigest]) { (map, check) =>
map + (check.algorithm -> MessageDigest.getInstance(check.algorithm))
}
else
Map.empty[String, MessageDigest]
def updateStartTimestamp(): Unit =
startTimestamp = nowMillis
def updateEndTimestamp(): Unit =
endTimestamp = nowMillis
def setNettyRequest(nettyRequest: NettyRequest) =
this.nettyRequest = Some(nettyRequest)
def markReset(): Unit =
_reset = true
def doReset(): Unit =
if (_reset) {
_reset = false
endTimestamp = 0L
status = None
headers = ResponseBuilder.EmptyHeaders
resetChunks()
digests = initDigests()
}
private def resetChunks(): Unit = {
chunks.foreach(_.release())
chunks = Nil
}
def accumulate(status: HttpResponseStatus): Unit = {
this.status = Some(status)
updateEndTimestamp()
}
def accumulate(headers: HttpHeaders): Unit = {
this.headers = headers
storeHtmlOrCss = inferHtmlResources && (isHtml(headers) || isCss(headers))
}
def accumulate(bodyPart: HttpResponseBodyPart): Unit = {
updateEndTimestamp()
val byteBuf = bodyPart.asInstanceOf[LazyResponseBodyPart].getBuf
if (byteBuf.readableBytes > 0) {
if (storeBodyParts || storeHtmlOrCss) {
chunks = byteBuf.retain() :: chunks // beware, we have to retain!
}
if (computeChecksums)
for {
nioBuffer <- byteBuf.nioBuffers
digest <- digests.values
} digest.update(nioBuffer.duplicate)
}
}
def resolvedCharset = Option(headers.get(HeaderNames.ContentType))
.flatMap(extractCharsetFromContentType)
.getOrElse(defaultCharset)
def build: Response = {
// time measurement is imprecise due to multi-core nature
// moreover, ProgressListener might be called AFTER ChannelHandler methods
// ensure response doesn't end before starting
endTimestamp = max(endTimestamp, startTimestamp)
val checksums = digests.foldLeft(Map.empty[String, String]) { (map, entry) =>
val (algo, md) = entry
map + (algo -> bytes2Hex(md.digest))
}
val bodyLength = chunks.sumBy(_.readableBytes)
val bodyUsages = bodyUsageStrategies.map(_.bodyUsage(bodyLength))
val resolvedCharset = Option(headers.get(HeaderNames.ContentType))
.flatMap(extractCharsetFromContentType)
.getOrElse(defaultCharset)
val properlyOrderedChunks = chunks.reverse
val body: ResponseBody =
if (properlyOrderedChunks.isEmpty)
NoResponseBody
else if (bodyUsages.contains(ByteArrayResponseBodyUsage))
ByteArrayResponseBody(properlyOrderedChunks, resolvedCharset)
else if (bodyUsages.contains(InputStreamResponseBodyUsage) || bodyUsages.isEmpty)
InputStreamResponseBody(properlyOrderedChunks, resolvedCharset)
else if (isTxt(headers))
StringResponseBody(properlyOrderedChunks, resolvedCharset)
else
ByteArrayResponseBody(properlyOrderedChunks, resolvedCharset)
resetChunks()
val rawResponse = HttpResponse(request, nettyRequest, status, headers, body, checksums, bodyLength, resolvedCharset, ResponseTimings(startTimestamp, endTimestamp))
responseTransformer match {
case None => rawResponse
case Some(transformer) => transformer.applyOrElse(rawResponse, ResponseBuilder.Identity)
}
}
def buildSafeResponse: Response =
HttpResponse(request, nettyRequest, status, headers, NoResponseBody, Map.empty, 0, resolvedCharset, ResponseTimings(startTimestamp, endTimestamp))
}
|
MykolaB/gatling
|
gatling-http/src/main/scala/io/gatling/http/response/ResponseBuilder.scala
|
Scala
|
apache-2.0
| 7,156 |
package skarn
package routing
/**
* Created by yusuke on 2015/03/04.
*/
import akka.actor.ActorContext
import skarn.push.PushServiceInfo
import spray.routing._
import Directives._
import spray.http.MediaTypes._
class IndexRoute(context: ActorContext) extends BasicRoute {
implicit val system = context.system
val resource = ""
val route = (pathSingleSlash | path(version)) {
get {
complete {
<html>
<body>
<h1>Skarn</h1>
<p>Push notification server build on Akka Actor with Scala</p>
<h2>Services</h2>
<ul>
{PushServiceInfo.services.map(s => <li>{s.name}</li>)}
</ul>
</body>
</html>
}
}
}
}
|
trifort/skarn
|
src/main/scala/skarn/routing/IndexRoute.scala
|
Scala
|
mit
| 736 |
package com.jaroop.anorm.debug
import anorm._
trait ParserDebugger {
val dash = "-" * 100
val star = "*" * 100
}
object RowParserDebugger extends ParserDebugger {
def apply[A](parser: RowParser[A], name: String): RowParser[A] = new RowParser[A] {
def apply(v1: Row): SqlResult[A] = {
val result = parser(v1)
println(s"$name : $result")
result
}
}
def group[A](parser: RowParser[A], name: String): RowParser[A] = new RowParser[A] {
def apply(v1: Row): SqlResult[A] = {
println(star)
println(dash)
println(s"RowParser: $name")
println(dash)
val result = parser(v1)
println(star)
result
}
private val listSize: List[A] => Int = _.size
private val singleSize: A => Int = _ => 1
private val optSize: Option[A] => Int = a => if(a.isDefined) 1 else 0
override def * : ResultSetParser[List[A]] = ResultSetParserDebugger(super.*, s"${name}.*", listSize)
override def + : ResultSetParser[List[A]] = ResultSetParserDebugger(super.+, s"${name}.+", listSize)
override def single: ResultSetParser[A] = ResultSetParserDebugger(super.single, s"${name}.single", singleSize)
override def singleOpt: ResultSetParser[Option[A]] = ResultSetParserDebugger(super.singleOpt, s"${name}.singleOpt", optSize)
}
}
object ResultSetParserDebugger extends ParserDebugger {
def apply[A](parser: ResultSetParser[A], name: String, count: A => Int): ResultSetParser[A] = new ResultSetParser[A] {
def apply(v1: SqlParser.ResultSet): SqlResult[A] = {
val result = parser(v1)
val parsed = result match {
case Success(a) => count(a)
case Error(_) => 0
}
println(star)
println(s"$name rows returned: ${v1.size}")
println(s"$name rows parsed: ${parsed}")
println(star)
result
}
}
}
|
mhzajac/anorm-debugger
|
src/main/scala/com/jaroop/anorm/debug/ParserDebugger.scala
|
Scala
|
apache-2.0
| 2,053 |
package com.equalinformation.scala.programs.gui
import scala.swing._
import scala.swing.event.ButtonClicked
/**
* Created by bpupadhyaya on 6/23/16.
*/
object SwingAppButton2 extends SimpleSwingApplication {
override def top: Frame = new MainFrame {
title = "Swing App with Button - 2"
val button = new Button {
text = "Click here"
}
val label = new Label {
text = "No button clicked yet"
}
contents = new BoxPanel(Orientation.Vertical) {
contents += button
contents += label
border = Swing.EmptyBorder(30,40,15,40)
}
listenTo(button)
var nClicks = 0
reactions += {
case ButtonClicked(b) =>
nClicks += 1
label.text = "Number of button clicks: "+nClicks
}
}
}
|
bpupadhyaya/scala-programs-collection
|
scala-programs-collection/src/main/scala/com/equalinformation/scala/programs/gui/SwingAppButton2.scala
|
Scala
|
apache-2.0
| 768 |
/*
* Copyright 2017-2022 John Snow Labs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.johnsnowlabs.nlp.annotators.btm
import java.io.{ByteArrayInputStream, ObjectInputStream}
import com.johnsnowlabs.storage.{RocksDBConnection, StorageReader}
class TMNodesReader(
override val connection: RocksDBConnection,
override protected val caseSensitiveIndex: Boolean
) extends StorageReader[TrieNode] {
override def emptyValue: TrieNode = TrieNode(0, isLeaf = true, 0, 0)
def lookup(index: Int): TrieNode = {
super.lookup(index.toString).get
}
override def fromBytes(bytes: Array[Byte]): TrieNode = {
val ois = new ObjectInputStream(new ByteArrayInputStream(bytes))
val value = ois.readObject.asInstanceOf[TrieNode]
ois.close()
value
}
override protected def readCacheSize: Int = 50000
}
|
JohnSnowLabs/spark-nlp
|
src/main/scala/com/johnsnowlabs/nlp/annotators/btm/TMNodesReader.scala
|
Scala
|
apache-2.0
| 1,408 |
/*
* Copyright 2017 Zhang Di
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.dizhang.seqspark.ds
import Pedigree._
/**
* Created by zhangdi on 10/16/17.
*/
class Pedigree(val data: Array[Ped]) {
//val graph: Map[String, Map[String, List[String]]]
}
object Pedigree {
def apply(input: Seq[String]): Pedigree = {
val data = input.map{l =>
val s = l.split("\\t")
Ped(s(0), s(1), s(2), s(3), s(4).toInt, s(5).toInt)
}.toArray
new Pedigree(data)
}
case class Ped(fid: String, iid: String, pid: String, mid: String, sex: Int, aff: Int)
}
|
statgenetics/seqspark
|
src/main/scala/org/dizhang/seqspark/ds/Pedigree.scala
|
Scala
|
apache-2.0
| 1,099 |
package net.virtualvoid.sbt.graph.util
import org.specs2.mutable.Specification
class AsciiTreeLayoutSpecs extends Specification {
sealed trait Tree
case class Branch(left: Tree, right: Tree) extends Tree
case class Leaf(i: Int) extends Tree
def children(t: Tree): Seq[Tree] = t match {
case Branch(left, right) ⇒ Seq(left, right)
case _: Leaf ⇒ Nil
}
def display(t: Tree): String = t match {
case Branch(left, right) ⇒ "Branch"
case Leaf(value) ⇒ value.toString * value
}
"Graph" should {
"layout simple graphs" in {
val simple = Branch(Branch(Leaf(1), Leaf(2)), Leaf(3))
AsciiTreeLayout.toAscii(simple, children, display, 20) ===
"""Branch
| +-Branch
| | +-1
| | +-22
| |\\u0020
| +-333
| """.stripMargin
}
"add separator lines where applicable" in {
val simple = Branch(Branch(Leaf(1), Branch(Leaf(2), Leaf(3))), Leaf(4))
AsciiTreeLayout.toAscii(simple, children, display, 20) ===
"""Branch
| +-Branch
| | +-1
| | +-Branch
| | +-22
| | +-333
| |\\u0020\\u0020\\u0020
| +-4444
| """.stripMargin
}
"layout deep graphs" in {
val simple = Branch(Branch(Branch(Branch(Branch(Branch(Leaf(1), Leaf(1)), Leaf(1)), Leaf(1)), Leaf(2)), Leaf(3)), Leaf(4))
AsciiTreeLayout.toAscii(simple, children, display, 10) ===
"""Branch
| +-Branch
| | +-Br..
| | | +-..
| | | | ..
| | | | ..
| | | | ..
| | | | ..
| | | | | |\\u0020
| | | | ..
| | | | |\\u0020
| | | | ..
| | | |\\u0020
| | | +-22
| | |\\u0020
| | +-333
| |\\u0020
| +-4444
| """.stripMargin
}
"cut off cycles" in {
AsciiTreeLayout.toAscii[Int](1, Map(
1 -> Seq(2, 3, 4),
2 -> Seq(4, 5),
3 -> Seq(),
4 -> Seq(3),
5 -> Seq(1, 4, 6, 7),
6 -> Seq(),
7 -> Seq()), _.toString).trim ===
"""1
| +-2
| | +-4
| | | +-3
| | |\\u0020
| | +-5
| | #-1 (cycle)
| | +-4
| | | +-3
| | |\\u0020
| | +-6
| | +-7
| |\\u0020\\u0020\\u0020
| +-3
| +-4
| +-3""".stripMargin.trim
}
}
}
|
jrudolph/sbt-dependency-graph
|
src/test/scala/net/virtualvoid/sbt/graph/util/AsciiTreeLayoutSpecs.scala
|
Scala
|
apache-2.0
| 2,558 |
/**
* Copyright (C) 2011 LShift Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.lshift.diffa.kernel.differencing
import org.junit.Test
import org.junit.Assert._
import net.lshift.diffa.kernel.config.{SetCategoryDescriptor, RangeCategoryDescriptor, PrefixCategoryDescriptor}
import scala.collection.JavaConversions._
import net.lshift.diffa.adapter.scanning._
import org.joda.time.{DateTimeZone, LocalDate, DateTime}
class AttributesUtilTest {
val categories = Map(
"someString" -> new PrefixCategoryDescriptor(5, 1, 1),
"someDate" -> new RangeCategoryDescriptor("date", "2011-01-01", "2011-12-31"),
"someTime" -> new RangeCategoryDescriptor("datetime", "2011-01-01T10:15:15.000Z", "2011-01-05T11:16:16.000Z"),
"someInt" -> new RangeCategoryDescriptor("int", "5", "12"),
"someSet" -> new SetCategoryDescriptor(Set("a", "b", "z"))
)
val constraints = Seq(
new StringPrefixConstraint("someString", "abcdef"),
new DateRangeConstraint("someDate", new LocalDate(2011, 1, 1), new LocalDate(2011, 12, 31)),
new TimeRangeConstraint("someTime",
new DateTime(2011, 1, 1, 10, 15, 15, DateTimeZone.UTC), new DateTime(2011, 12, 31, 11, 16, 16, DateTimeZone.UTC)),
new IntegerRangeConstraint("someInt", 5, 12),
new SetConstraint("someSet", Set("a", "b", "z"))
)
val allAttributes = Map(
"someString" -> "abcdefg",
"someDate" -> "2011-05-01",
"someTime" -> "2011-01-02T09:11:21.000Z",
"someInt" -> "8",
"someSet" -> "b"
)
val allWrongAttributes = Map(
"someString" -> "gadadads",
"someDate" -> "2012-05-01",
"someTime" -> "2012-01-02T09:11:21.000Z",
"someInt" -> "42",
"someSet" -> "c"
)
@Test
def shouldAllowMissingAttributesOnToTypedMapOfUntypedMap() {
val typed = AttributesUtil.toTypedMap(categories, Map("someString" -> "aaa"))
assertEquals(Map("someString" -> StringAttribute("aaa")), typed)
}
@Test
def shouldNotDetectProblemsWhenThereAreNoIssues() {
val result = AttributesUtil.detectAttributeIssues(categories, constraints, allAttributes)
assertEquals(result, Map[String, String]())
}
@Test
def shouldComplainAboutTooManyAttributes() {
val result = AttributesUtil.detectAttributeIssues(categories, constraints, allAttributes ++ Map("extra" -> "abc"))
assertEquals(result, Map("extra" -> "no matching category defined"))
}
@Test
def shouldDetectMissingAttributes() {
val result = AttributesUtil.detectAttributeIssues(categories, constraints, allAttributes -- Seq("someString", "someInt"))
assertEquals(result, Map("someString" -> "property is missing", "someInt" -> "property is missing"))
}
@Test
def shouldDetectOutOfConstraintAttributes() {
val result = AttributesUtil.detectAttributeIssues(categories, constraints, allWrongAttributes)
assertEquals(result, Map(
"someString" -> "gadadads does not have the prefix abcdef",
"someDate" -> "2012-05-01 is not in range 2011-01-01 -> 2011-12-31",
"someTime" -> "2012-01-02T09:11:21.000Z is not in range 2011-01-01T10:15:15.000Z -> 2011-12-31T11:16:16.000Z",
"someInt" -> "42 is not in range 5 -> 12",
"someSet" -> "c is not a member of Set(a, b, z)"
))
}
}
|
lshift/diffa
|
kernel/src/test/scala/net/lshift/diffa/kernel/differencing/AttributesUtilTest.scala
|
Scala
|
apache-2.0
| 3,774 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.optimizer
import org.apache.spark.sql.catalyst.dsl.expressions._
import org.apache.spark.sql.catalyst.dsl.plans._
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.expressions.codegen.CodegenContext
import org.apache.spark.sql.catalyst.plans.PlanTest
import org.apache.spark.sql.catalyst.plans.logical.{LocalRelation, LogicalPlan, Range}
import org.apache.spark.sql.catalyst.rules.RuleExecutor
import org.apache.spark.sql.catalyst.util.GenericArrayData
import org.apache.spark.sql.types._
/**
* SPARK-18601 discusses simplification direct access to complex types creators.
* i.e. {{{create_named_struct(square, `x` * `x`).square}}} can be simplified to {{{`x` * `x`}}}.
* sam applies to create_array and create_map
*/
class ComplexTypesSuite extends PlanTest with ExpressionEvalHelper {
object Optimizer extends RuleExecutor[LogicalPlan] {
val batches =
Batch("collapse projections", FixedPoint(10),
CollapseProject) ::
Batch("Constant Folding", FixedPoint(10),
NullPropagation,
ConstantFolding,
BooleanSimplification,
SimplifyConditionals,
SimplifyBinaryComparison,
SimplifyExtractValueOps) :: Nil
}
private val idAtt = ('id).long.notNull
private val nullableIdAtt = ('nullable_id).long
private val relation = LocalRelation(idAtt, nullableIdAtt)
private val testRelation = LocalRelation('a.int, 'b.int, 'c.int, 'd.double, 'e.int)
private def checkRule(originalQuery: LogicalPlan, correctAnswer: LogicalPlan) = {
val optimized = Optimizer.execute(originalQuery.analyze)
assert(optimized.resolved, "optimized plans must be still resolvable")
comparePlans(optimized, correctAnswer.analyze)
}
test("explicit get from namedStruct") {
val query = relation
.select(
GetStructField(
CreateNamedStruct(Seq("att", 'id )),
0,
None) as "outerAtt")
val expected = relation.select('id as "outerAtt")
checkRule(query, expected)
}
test("explicit get from named_struct- expression maintains original deduced alias") {
val query = relation
.select(GetStructField(CreateNamedStruct(Seq("att", 'id)), 0, None))
val expected = relation
.select('id as "named_struct(att, id).att")
checkRule(query, expected)
}
test("collapsed getStructField ontop of namedStruct") {
val query = relation
.select(CreateNamedStruct(Seq("att", 'id)) as "struct1")
.select(GetStructField('struct1, 0, None) as "struct1Att")
val expected = relation.select('id as "struct1Att")
checkRule(query, expected)
}
test("collapse multiple CreateNamedStruct/GetStructField pairs") {
val query = relation
.select(
CreateNamedStruct(Seq(
"att1", 'id,
"att2", 'id * 'id)) as "struct1")
.select(
GetStructField('struct1, 0, None) as "struct1Att1",
GetStructField('struct1, 1, None) as "struct1Att2")
val expected =
relation.
select(
'id as "struct1Att1",
('id * 'id) as "struct1Att2")
checkRule(query, expected)
}
test("collapsed2 - deduced names") {
val query = relation
.select(
CreateNamedStruct(Seq(
"att1", 'id,
"att2", 'id * 'id)) as "struct1")
.select(
GetStructField('struct1, 0, None),
GetStructField('struct1, 1, None))
val expected =
relation.
select(
'id as "struct1.att1",
('id * 'id) as "struct1.att2")
checkRule(query, expected)
}
test("simplified array ops") {
val rel = relation.select(
CreateArray(Seq(
CreateNamedStruct(Seq(
"att1", 'id,
"att2", 'id * 'id)),
CreateNamedStruct(Seq(
"att1", 'id + 1,
"att2", ('id + 1) * ('id + 1))
))
) as "arr"
)
val query = rel
.select(
GetArrayStructFields('arr, StructField("att1", LongType, false), 0, 1, false) as "a1",
GetArrayItem('arr, 1) as "a2",
GetStructField(GetArrayItem('arr, 1), 0, None) as "a3",
GetArrayItem(
GetArrayStructFields('arr,
StructField("att1", LongType, false),
0,
1,
false),
1) as "a4")
val expected = relation
.select(
CreateArray(Seq('id, 'id + 1L)) as "a1",
CreateNamedStruct(Seq(
"att1", ('id + 1L),
"att2", (('id + 1L) * ('id + 1L)))) as "a2",
('id + 1L) as "a3",
('id + 1L) as "a4")
checkRule(query, expected)
}
test("SPARK-22570: CreateArray should not create a lot of global variables") {
val ctx = new CodegenContext
CreateArray(Seq(Literal(1))).genCode(ctx)
assert(ctx.inlinedMutableStates.length == 0)
}
test("SPARK-23208: Test code splitting for create array related methods") {
val inputs = (1 to 2500).map(x => Literal(s"l_$x"))
checkEvaluation(CreateArray(inputs), new GenericArrayData(inputs.map(_.eval())))
}
test("simplify map ops") {
val rel = relation
.select(
CreateMap(Seq(
"r1", CreateNamedStruct(Seq("att1", 'id)),
"r2", CreateNamedStruct(Seq("att1", ('id + 1L))))) as "m")
val query = rel
.select(
GetMapValue('m, "r1") as "a1",
GetStructField(GetMapValue('m, "r1"), 0, None) as "a2",
GetMapValue('m, "r32") as "a3",
GetStructField(GetMapValue('m, "r32"), 0, None) as "a4")
val expected =
relation.select(
CreateNamedStruct(Seq("att1", 'id)) as "a1",
'id as "a2",
Literal.create(
null,
StructType(
StructField("att1", LongType, nullable = false) :: Nil
)
) as "a3",
Literal.create(null, LongType) as "a4")
checkRule(query, expected)
}
test("simplify map ops, constant lookup, dynamic keys") {
val query = relation.select(
GetMapValue(
CreateMap(Seq(
'id, ('id + 1L),
('id + 1L), ('id + 2L),
('id + 2L), ('id + 3L),
Literal(13L), 'id,
('id + 3L), ('id + 4L),
('id + 4L), ('id + 5L))),
13L) as "a")
val expected = relation
.select(
CaseWhen(Seq(
(EqualTo(13L, 'id), ('id + 1L)),
(EqualTo(13L, ('id + 1L)), ('id + 2L)),
(EqualTo(13L, ('id + 2L)), ('id + 3L)),
(Literal(true), 'id))) as "a")
checkRule(query, expected)
}
test("simplify map ops, dynamic lookup, dynamic keys, lookup is equivalent to one of the keys") {
val query = relation
.select(
GetMapValue(
CreateMap(Seq(
'id, ('id + 1L),
('id + 1L), ('id + 2L),
('id + 2L), ('id + 3L),
('id + 3L), ('id + 4L),
('id + 4L), ('id + 5L))),
('id + 3L)) as "a")
val expected = relation
.select(
CaseWhen(Seq(
(EqualTo('id + 3L, 'id), ('id + 1L)),
(EqualTo('id + 3L, ('id + 1L)), ('id + 2L)),
(EqualTo('id + 3L, ('id + 2L)), ('id + 3L)),
(Literal(true), ('id + 4L)))) as "a")
checkRule(query, expected)
}
test("simplify map ops, no positive match") {
val rel = relation
.select(
GetMapValue(
CreateMap(Seq(
'id, ('id + 1L),
('id + 1L), ('id + 2L),
('id + 2L), ('id + 3L),
('id + 3L), ('id + 4L),
('id + 4L), ('id + 5L))),
'id + 30L) as "a")
val expected = relation.select(
CaseWhen(Seq(
(EqualTo('id + 30L, 'id), ('id + 1L)),
(EqualTo('id + 30L, ('id + 1L)), ('id + 2L)),
(EqualTo('id + 30L, ('id + 2L)), ('id + 3L)),
(EqualTo('id + 30L, ('id + 3L)), ('id + 4L)),
(EqualTo('id + 30L, ('id + 4L)), ('id + 5L)))) as "a")
checkRule(rel, expected)
}
test("simplify map ops, constant lookup, mixed keys, eliminated constants") {
val rel = relation
.select(
GetMapValue(
CreateMap(Seq(
'id, ('id + 1L),
('id + 1L), ('id + 2L),
('id + 2L), ('id + 3L),
Literal(14L), 'id,
('id + 3L), ('id + 4L),
('id + 4L), ('id + 5L))),
13L) as "a")
val expected = relation
.select(
CaseKeyWhen(13L,
Seq('id, ('id + 1L),
('id + 1L), ('id + 2L),
('id + 2L), ('id + 3L),
('id + 3L), ('id + 4L),
('id + 4L), ('id + 5L))) as "a")
checkRule(rel, expected)
}
test("simplify map ops, potential dynamic match with null value + an absolute constant match") {
val rel = relation
.select(
GetMapValue(
CreateMap(Seq(
'id, ('id + 1L),
('id + 1L), ('id + 2L),
('id + 2L), Literal.create(null, LongType),
Literal(2L), 'id,
('id + 3L), ('id + 4L),
('id + 4L), ('id + 5L))),
2L ) as "a")
val expected = relation
.select(
CaseWhen(Seq(
(EqualTo(2L, 'id), ('id + 1L)),
// these two are possible matches, we can't tell until runtime
(EqualTo(2L, ('id + 1L)), ('id + 2L)),
(EqualTo(2L, 'id + 2L), Literal.create(null, LongType)),
// this is a definite match (two constants),
// but it cannot override a potential match with ('id + 2L),
// which is exactly what [[Coalesce]] would do in this case.
(Literal.TrueLiteral, 'id))) as "a")
checkRule(rel, expected)
}
test("SPARK-23500: Simplify array ops that are not at the top node") {
val query = LocalRelation('id.long)
.select(
CreateArray(Seq(
CreateNamedStruct(Seq(
"att1", 'id,
"att2", 'id * 'id)),
CreateNamedStruct(Seq(
"att1", 'id + 1,
"att2", ('id + 1) * ('id + 1))
))
) as "arr")
.select(
GetStructField(GetArrayItem('arr, 1), 0, None) as "a1",
GetArrayItem(
GetArrayStructFields('arr,
StructField("att1", LongType, nullable = false),
ordinal = 0,
numFields = 1,
containsNull = false),
ordinal = 1) as "a2")
.orderBy('id.asc)
val expected = LocalRelation('id.long)
.select(
('id + 1L) as "a1",
('id + 1L) as "a2")
.orderBy('id.asc)
checkRule(query, expected)
}
test("SPARK-23500: Simplify map ops that are not top nodes") {
val query =
LocalRelation('id.long)
.select(
CreateMap(Seq(
"r1", 'id,
"r2", 'id + 1L)) as "m")
.select(
GetMapValue('m, "r1") as "a1",
GetMapValue('m, "r32") as "a2")
.orderBy('id.asc)
.select('a1, 'a2)
val expected =
LocalRelation('id.long).select(
'id as "a1",
Literal.create(null, LongType) as "a2")
.orderBy('id.asc)
checkRule(query, expected)
}
test("SPARK-23500: Simplify complex ops that aren't at the plan root") {
val structRel = relation
.select(GetStructField(CreateNamedStruct(Seq("att1", 'nullable_id)), 0, None) as "foo")
.groupBy($"foo")("1")
val structExpected = relation
.select('nullable_id as "foo")
.groupBy($"foo")("1")
checkRule(structRel, structExpected)
val arrayRel = relation
.select(GetArrayItem(CreateArray(Seq('nullable_id, 'nullable_id + 1L)), 0) as "a1")
.groupBy($"a1")("1")
val arrayExpected = relation.select('nullable_id as "a1").groupBy($"a1")("1")
checkRule(arrayRel, arrayExpected)
val mapRel = relation
.select(GetMapValue(CreateMap(Seq("id", 'nullable_id)), "id") as "m1")
.groupBy($"m1")("1")
val mapExpected = relation
.select('nullable_id as "m1")
.groupBy($"m1")("1")
checkRule(mapRel, mapExpected)
}
test("SPARK-23500: Ensure that aggregation expressions are not simplified") {
// Make sure that aggregation exprs are correctly ignored. Maps can't be used in
// grouping exprs so aren't tested here.
val structAggRel = relation.groupBy(
CreateNamedStruct(Seq("att1", 'nullable_id)))(
GetStructField(CreateNamedStruct(Seq("att1", 'nullable_id)), 0, None))
checkRule(structAggRel, structAggRel)
val arrayAggRel = relation.groupBy(
CreateArray(Seq('nullable_id)))(GetArrayItem(CreateArray(Seq('nullable_id)), 0))
checkRule(arrayAggRel, arrayAggRel)
// This could be done if we had a more complex rule that checks that
// the CreateMap does not come from key.
val originalQuery = relation
.groupBy('id)(
GetMapValue(CreateMap(Seq('id, 'id + 1L)), 0L) as "a"
)
checkRule(originalQuery, originalQuery)
}
test("SPARK-23500: namedStruct and getField in the same Project #1") {
val originalQuery =
testRelation
.select(
namedStruct("col1", 'b, "col2", 'c).as("s1"), 'a, 'b)
.select('s1 getField "col2" as 's1Col2,
namedStruct("col1", 'a, "col2", 'b).as("s2"))
.select('s1Col2, 's2 getField "col2" as 's2Col2)
val correctAnswer =
testRelation
.select('c as 's1Col2, 'b as 's2Col2)
checkRule(originalQuery, correctAnswer)
}
test("SPARK-23500: namedStruct and getField in the same Project #2") {
val originalQuery =
testRelation
.select(
namedStruct("col1", 'b, "col2", 'c) getField "col2" as 'sCol2,
namedStruct("col1", 'a, "col2", 'c) getField "col1" as 'sCol1)
val correctAnswer =
testRelation
.select('c as 'sCol2, 'a as 'sCol1)
checkRule(originalQuery, correctAnswer)
}
test("SPARK-24313: support binary type as map keys in GetMapValue") {
val mb0 = Literal.create(
Map(Array[Byte](1, 2) -> "1", Array[Byte](3, 4) -> null, Array[Byte](2, 1) -> "2"),
MapType(BinaryType, StringType))
val mb1 = Literal.create(Map[Array[Byte], String](), MapType(BinaryType, StringType))
checkEvaluation(GetMapValue(mb0, Literal(Array[Byte](1, 2, 3))), null)
checkEvaluation(GetMapValue(mb1, Literal(Array[Byte](1, 2))), null)
checkEvaluation(GetMapValue(mb0, Literal(Array[Byte](2, 1), BinaryType)), "2")
checkEvaluation(GetMapValue(mb0, Literal(Array[Byte](3, 4))), null)
}
private val structAttr = 'struct1.struct('a.int)
private val testStructRelation = LocalRelation(structAttr)
test("simplify GetStructField on WithFields that is not changing the attribute being extracted") {
val query = testStructRelation.select(
GetStructField(WithFields('struct1, Seq("b"), Seq(Literal(1))), 0, Some("a")) as "outerAtt")
val expected = testStructRelation.select(GetStructField('struct1, 0, Some("a")) as "outerAtt")
checkRule(query, expected)
}
test("simplify GetStructField on WithFields that is changing the attribute being extracted") {
val query = testStructRelation.select(
GetStructField(WithFields('struct1, Seq("b"), Seq(Literal(1))), 1, Some("b")) as "outerAtt")
val expected = testStructRelation.select(Literal(1) as "outerAtt")
checkRule(query, expected)
}
test(
"simplify GetStructField on WithFields that is changing the attribute being extracted twice") {
val query = testStructRelation
.select(GetStructField(WithFields('struct1, Seq("b", "b"), Seq(Literal(1), Literal(2))), 1,
Some("b")) as "outerAtt")
val expected = testStructRelation.select(Literal(2) as "outerAtt")
checkRule(query, expected)
}
test("collapse multiple GetStructField on the same WithFields") {
val query = testStructRelation
.select(WithFields('struct1, Seq("b"), Seq(Literal(2))) as "struct2")
.select(
GetStructField('struct2, 0, Some("a")) as "struct1A",
GetStructField('struct2, 1, Some("b")) as "struct1B")
val expected = testStructRelation.select(
GetStructField('struct1, 0, Some("a")) as "struct1A",
Literal(2) as "struct1B")
checkRule(query, expected)
}
test("collapse multiple GetStructField on different WithFields") {
val query = testStructRelation
.select(
WithFields('struct1, Seq("b"), Seq(Literal(2))) as "struct2",
WithFields('struct1, Seq("b"), Seq(Literal(3))) as "struct3")
.select(
GetStructField('struct2, 0, Some("a")) as "struct2A",
GetStructField('struct2, 1, Some("b")) as "struct2B",
GetStructField('struct3, 0, Some("a")) as "struct3A",
GetStructField('struct3, 1, Some("b")) as "struct3B")
val expected = testStructRelation
.select(
GetStructField('struct1, 0, Some("a")) as "struct2A",
Literal(2) as "struct2B",
GetStructField('struct1, 0, Some("a")) as "struct3A",
Literal(3) as "struct3B")
checkRule(query, expected)
}
}
|
dbtsai/spark
|
sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/complexTypesSuite.scala
|
Scala
|
apache-2.0
| 17,712 |
package org.openmole.core.workflow
import org.openmole.core.expansion.FromContext
import org.openmole.core.fileservice.FileService
import org.openmole.core.workspace.TmpDirectory
package object validation {
trait ValidationPackage
}
|
openmole/openmole
|
openmole/core/org.openmole.core.workflow/src/main/scala/org/openmole/core/workflow/validation/package.scala
|
Scala
|
agpl-3.0
| 239 |
package pgep.Functions
object BoolFunctions {
val and = Func(Symbol("&"), List(classOf[Boolean], classOf[Boolean]), classOf[Boolean],
(ns: Seq[Any]) => (ns(0), ns(1)) match {
case (n1: Boolean, n2: Boolean) => n1 & n2},
(ns: Seq[String]) => (ns(0), ns(1)) match {
case (s1, s2) => "("+ s1 +" & "+ s2 +")"})
val or = Func(Symbol("|"), List(classOf[Boolean], classOf[Boolean]), classOf[Boolean],
(ns: Seq[Any]) => (ns(0), ns(1)) match {
case (n1: Boolean, n2: Boolean) => n1 | n2},
(ns: Seq[String]) => (ns(0), ns(1)) match {
case (s1, s2) => "("+ s1 +" | "+ s2 +")"})
val not = Func(Symbol("Not"), List(classOf[Boolean]), classOf[Boolean],
(ns: Seq[Any]) => ns(0) match {
case n: Boolean => !n},
(ns: Seq[String]) => ns(0) match {
case s => "Not("+ s +")"})
}
|
khernyo/PGEP
|
src/pgep/Functions/BoolFunctions.scala
|
Scala
|
gpl-3.0
| 982 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql
/**
* Support for running Spark SQL queries using functionality from Apache Hive (does not require an
* existing Hive installation). Supported Hive features include:
* - Using HiveQL to express queries.
* - Reading metadata from the Hive Metastore using HiveSerDes.
* - Hive UDFs, UDAs, UDTs
*/
package object hive
|
pgandhi999/spark
|
sql/hive/src/main/scala/org/apache/spark/sql/hive/package.scala
|
Scala
|
apache-2.0
| 1,157 |
/*
* Copyright (c) 2014-2017 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0, and
* you may not use this file except in compliance with the Apache License
* Version 2.0. You may obtain a copy of the Apache License Version 2.0 at
* http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the Apache License Version 2.0 is distributed on an "AS
* IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the Apache License Version 2.0 for the specific language
* governing permissions and limitations there under.
*/
package com.snowplowanalytics.snowplow.collectors.scalastream
import scala.concurrent.duration._
import model._
object TestUtils {
val testConf = CollectorConfig(
interface = "0.0.0.0",
port = 8080,
p3p = P3PConfig("/w3c/p3p.xml", "NOI DSP COR NID PSA OUR IND COM NAV STA"),
cookie = CookieConfig(true, "sp", 365.days, None),
cookieBounce = CookieBounceConfig(false, "bounce", "new-nuid"),
sink = "stdout",
streams = StreamsConfig(
good = "good",
bad = "bad",
useIpAddressAsPartitionKey = false,
kinesis = KinesisConfig(
region = "us-east-1",
threadPoolSize = 12,
aws = AWSConfig("cpf", "cpf"),
backoffPolicy = BackoffPolicyConfig(3000, 60000)
),
kafka = KafkaConfig("localhost:9092", 1),
buffer = BufferConfig(4000000, 500, 60000)
)
)
}
|
sspinc/snowplow
|
2-collectors/scala-stream-collector/src/test/scala/com.snowplowanalytics.snowplow.collectors.scalastream/TestUtils.scala
|
Scala
|
apache-2.0
| 1,563 |
package korolev.blazeServer
import org.http4s.blaze.channel.ServerChannel
import org.http4s.blaze.http.HttpService
import slogging._
import scala.concurrent.ExecutionContextExecutorService
/**
* @author Aleksey Fomkin <[email protected]>
*/
abstract class KorolevBlazeServer(config: BlazeServerConfig = BlazeServerConfig.default)(
implicit executionContext: ExecutionContextExecutorService
) {
def service: HttpService
def start(): ServerChannel = {
// activate SLF4J backend
//LoggerConfig.factory = SLF4JLoggerFactory()
runServer(service, config.copy(doNotBlockCurrentThread = true))
}
/*def main(args: Array[String]): Unit = {
// activate SLF4J backend
LoggerConfig.factory = SLF4JLoggerFactory()
val escapedConfig = Option(config).getOrElse(BlazeServerConfig())
runServer(service, escapedConfig)
}*/
}
|
PhilAndrew/JumpMicro
|
JMCloner/src/main/scala/korolev/blazeServer/KorolevBlazeServer.scala
|
Scala
|
mit
| 861 |
package org.scalatest.examples.funspec
import org.scalatest.FunSpec
class SetSpec extends FunSpec {
describe("A Set") {
describe("when empty") {
it("should have size 0") {
assert(Set.empty.size === 0)
}
it("should produce NoSuchElementException when head is invoked") {
intercept[NoSuchElementException] {
Set.empty.head
}
}
}
}
}
|
hubertp/scalatest
|
examples/src/main/scala/org/scalatest/examples/funspec/SetSpec.scala
|
Scala
|
apache-2.0
| 409 |
package org.zalando.nakadi.client.scala.model
import org.scalatest.Matchers
import org.scalatest.WordSpec
import com.fasterxml.jackson.annotation.JsonProperty
import com.fasterxml.jackson.annotation.JsonIgnore
import com.fasterxml.jackson.databind.DeserializationFeature
import org.slf4j.Logger
import org.slf4j.LoggerFactory
import com.fasterxml.jackson.core.`type`.TypeReference
class ScalaJacksonJsonMarshallerTest extends WordSpec with Matchers {
private val log = LoggerFactory.getLogger(this.getClass());
val mapper = ScalaJacksonJsonMarshaller.defaultObjectMapper
private def toJson[T](in: T): String = {
log.info("ToJson - in {}", in.toString());
val result = ScalaJacksonJsonMarshaller.serializer.to(in);
log.info("ToJson - out {}", result);
result
}
private def toObject[T](json: String, expectedType: TypeReference[T]): T = {
log.info("toObject - in {}", json);
val result = ScalaJacksonJsonMarshaller.deserializer(expectedType).from(json);
log.info("toObject - out {}", result.toString());
result
}
private def testMarshallingUnmarshalling[T](in: T, expectedType: TypeReference[T]): Unit = {
val json = toJson(in)
val out = toObject(json, expectedType)
in shouldBe out
}
"Serialize/Deserialize BatchItemResponse" in {
val in = ModelFactory.newBatchItemResponse
val expectedType = ScalaJacksonJsonMarshaller.batchItemResponseTR
testMarshallingUnmarshalling(in, expectedType)
}
"Serialize/Deserialize BusinessEvent" in {
val in = ModelFactory.newBusinessEvent
val expectedType = new TypeReference[BusinessEventImpl] {}
testMarshallingUnmarshalling(in, expectedType)
}
"Serialize/Deserialize Cursor" in {
val in = ModelFactory.newCursor
val expectedType = ScalaJacksonJsonMarshaller.cursorTR
testMarshallingUnmarshalling(in, expectedType)
}
"Serialize/Deserialize DataChangeEvent" in {
val event = ModelFactory.newSimpleEvent
val in = ModelFactory.newDataChangeEvent(event)
val expectedType = new TypeReference[DataChangeEvent[SimpleEvent]] {}
testMarshallingUnmarshalling(in, expectedType)
}
"Serialize/Deserialize EventMetadata" in {
val in = ModelFactory.newEventMetadata()
val expectedType = ScalaJacksonJsonMarshaller.eventMetadataTR
testMarshallingUnmarshalling(in, expectedType)
}
"Serialize/Deserialize EventStreamBatch" in {
val events = List(ModelFactory.newSimpleEvent(), ModelFactory.newSimpleEvent(), ModelFactory.newSimpleEvent())
val cursor = ModelFactory.newCursor()
val in = ModelFactory.newEventStreamBatch(events, cursor)
val expectedType = new TypeReference[EventStreamBatch[SimpleEvent]] {}
testMarshallingUnmarshalling(in, expectedType)
}
"Serialize/Deserialize EventTypeSchema" in {
val in = ModelFactory.newEventTypeSchema()
val expectedType = ScalaJacksonJsonMarshaller.eventTypeSchemaTR
testMarshallingUnmarshalling(in, expectedType)
}
"Serialize/Deserialize EventTypeStatistics" in {
val in = ModelFactory.newEventTypeStatistics()
val expectedType = ScalaJacksonJsonMarshaller.eventTypeStatisticsTR
testMarshallingUnmarshalling(in, expectedType)
}
"Serialize/Deserialize Metrics" in {
val in = ModelFactory.newMetrics()
val expectedType = ScalaJacksonJsonMarshaller.metricsTR
val json = toJson(in)
val out = toObject(json, expectedType)
val jsonOut = toJson(out)
json shouldBe jsonOut
}
"Serialize/Deserialize Partition" in {
val in = ModelFactory.newPartition()
val expectedType = ScalaJacksonJsonMarshaller.partitionTR
testMarshallingUnmarshalling(in, expectedType)
}
"Serialize/Deserialize Problem" in {
val in = ModelFactory.newProblem()
val expectedType = ScalaJacksonJsonMarshaller.problemTR
testMarshallingUnmarshalling(in, expectedType)
}
"Serialize/Deserialize PartitionStrategy" in {
val in = ModelFactory.newPartitionStrategy()
val expectedType = ScalaJacksonJsonMarshaller.partitionResolutionStrategyTR
testMarshallingUnmarshalling(in, expectedType)
}
"Serialize/Deserialize EventEnrichmentStrategy" in {
val in = ModelFactory.newEventEnrichmentStrategy()
val expectedType = ScalaJacksonJsonMarshaller.eventEnrichmentStrategyTR
testMarshallingUnmarshalling(in, expectedType)
}
"Serialize/Deserialize EventTypeCategory" in {
val in = ModelFactory.newEventTypeCategory()
val expectedType = ScalaJacksonJsonMarshaller.eventTypeCategoryTR
testMarshallingUnmarshalling(in, expectedType)
}
"Serialize/Deserialize EventType" in {
val in = ModelFactory.newEventType()
val expectedType = ScalaJacksonJsonMarshaller.eventTypeTR
testMarshallingUnmarshalling(in, expectedType)
}
"Serialize/Deserialize CompatibilityMode" in {
val in = ModelFactory.newCompatibilityMode()
val expectedType = ScalaJacksonJsonMarshaller.compatibilityModeTR
testMarshallingUnmarshalling(in, expectedType)
}
}
|
zalando/nakadi-klients
|
client/src/test/scala/org/zalando/nakadi/client/scala/model/ScalaJacksonJsonMarshallerTest.scala
|
Scala
|
mit
| 4,976 |
/*
* Copyright 2020 Precog Data
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.contrib
import slamdata.Predef.{Eq => _, _}
import _root_.cats.{Eq, Functor, Traverse}
import _root_.iota.{CopK, TListK}
import higherkindness.droste.Delay
package object iotac {
type ACopK[α] = CopK[_, α]
type :<<:[F[_], G[α] <: ACopK[α]] = CopK.Inject[F, G]
@SuppressWarnings(Array("org.wartremover.warts.AsInstanceOf"))
def computeListK[LL <: TListK, RR <: TListK, A](inp: CopK[LL, A])(
implicit compute: TListK.Compute.Aux[LL, RR])
: CopK[RR, A] = {
val _ = compute
inp.asInstanceOf[CopK[RR, A]]
}
def mkInject[F[_], LL <: TListK](i: Int): CopK.Inject[F, CopK[LL, ?]] =
CopK.Inject.injectFromInjectL[F, LL](
CopK.InjectL.makeInjectL[F, LL](
new TListK.Pos[LL, F] { val index = i }))
implicit def copkFunctor[LL <: TListK](implicit M: FunctorMaterializer[LL]): Functor[CopK[LL, ?]] =
M.materialize(offset = 0)
implicit def copkDelayEq[LL <: TListK](implicit M: DelayEqMaterializer[LL]): Delay[Eq, CopK[LL, ?]] =
M.materialize(offset = 0)
implicit def copkTraverse[LL <: TListK](implicit M: TraverseMaterializer[LL]): Traverse[CopK[LL, ?]] =
M.materialize(offset = 0)
}
|
quasar-analytics/quasar
|
foundation/src/main/scala/quasar/contrib/iotac/package.scala
|
Scala
|
apache-2.0
| 1,761 |
package org.fathens.colorworks.config.dsl
import scala.io.Source
import scala.util.Random
import scala.tools.nsc.{Global, Settings}
import scala.tools.nsc.interpreter.AbstractFileClassLoader
import scala.tools.nsc.io.VirtualDirectory
import scala.tools.nsc.reporters.ConsoleReporter
import scala.tools.nsc.util.BatchSourceFile
import java.io.File
class DynamicCompiler {
private val SOURCE_ENCODING: String = "UTF-8"
private val virtualDirectory: VirtualDirectory = new VirtualDirectory("[memory]", None)
private val scalaCompilerPath: List[String] = jarPathOfClass("scala.tools.nsc.Global")
private val scalaLibraryPath: List[String] = jarPathOfClass("scala.ScalaObject")
private val bootClassPath = scalaCompilerPath ::: scalaLibraryPath
private val settings: Settings = new Settings
settings.deprecation.value = true // 非推奨の警告を有効に
settings.unchecked.value = true // unchecked警告を有効に
settings.outputDirs.setSingleOutput(virtualDirectory) // 結果の出力先はメモリ上
settings.bootclasspath.value = bootClassPath mkString (File.pathSeparator)
private val global: Global = new Global(settings, new ConsoleReporter(settings)) // Reporterはコンソール上に出力
private val classLoader: AbstractFileClassLoader =
new AbstractFileClassLoader(virtualDirectory, getClass.getClassLoader)
// rootをメモリ上に、このクラスを読み込んだClassLoaderを親ClassLoaderに設定
def compileClassFromFile(sourcePath: String, className: String): Option[Class[_]] = {
val source = Source.fromFile(sourcePath, SOURCE_ENCODING)
try {
compileClass(className, source.mkString, sourcePath)
} finally {
if (source != null) source.close()
}
}
def compileClass(className: String, source: String, sourcePath: String = "[dynamic compiler]"): Option[Class[_]] = try {
val compiler = new global.Run
compiler.compileSources(List(new BatchSourceFile(sourcePath, source)))
Some(classLoader.findClass(className))
} catch {
case th: Throwable =>
th.printStackTrace()
None
}
def runScriptFromFile(sourcePath: String): Unit = {
val source = Source.fromFile(sourcePath, SOURCE_ENCODING)
try {
runScript(source.mkString)
} finally {
if (source != null) source.close()
}
}
def runScript(source: String): Unit = try {
val scriptClassName = wrapScriptClassName
val wrappedSource = wrapScript(source, scriptClassName)
compileClass(scriptClassName, wrappedSource) foreach { clazz =>
clazz.newInstance.asInstanceOf[() => Any].apply()
}
} catch {
case th: Throwable => th.printStackTrace()
} finally {
virtualDirectory.clear
}
private def wrapScriptClassName: String = {
val random = new Random
"WrappedScript_" + random.nextInt(Integer.MAX_VALUE)
}
private def wrapScript(code: String, className: String): String = {
"""|class %s extends (() => Any) {
| def apply() = {
| %s
| }
|}
|""".stripMargin.format(className, code)
}
private def jarPathOfClass(className: String): List[String] = {
val resource = className.split('.').mkString("/", "/", ".class")
val path = getClass.getResource(resource).getPath
val indexOfFileScheme = path.indexOf("file:") + 5
val indexOfSeparator = path.lastIndexOf('!')
List(path.substring(indexOfFileScheme, indexOfSeparator))
}
}
|
sawatani/ColorWorks
|
src/main/scala/org/fathens/colorworks/config/dsl/DynamicCompiler.scala
|
Scala
|
mit
| 3,453 |
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** @author Jerry Shi, John Miller
* @version 1.2
* @date Wed Jan 9 15:07:13 EST 2013
* @see LICENSE (MIT style license file).
* @see http://en.wikipedia.org/wiki/C4.5_algorithm
*/
package scalation.analytics
import collection.mutable.{MutableList, Queue}
import math.{ceil, floor}
import util.control.Breaks._
import util.Sorting
import scalation.linalgebra.{MatrixI, VectorD, VectorI}
import scalation.util.Error
import Probability.entropy
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `DecisionTreeC45` class implements a Decision Tree classifier using the
* C4.5 algorithm. The classifier is trained using a data matrix 'x' and a
* classification vector 'y'. Each data vector in the matrix is classified into
* one of 'k' classes numbered '0, ..., k-1'. Each column in the matrix represents
* a feature (e.g., Humidity). The 'vc' array gives the number of distinct values
* per feature (e.g., 2 for Humidity).
* @param x the data vectors stored as rows of a matrix
* @param y the class array, where y_i = class for row i of the matrix x
* @param fn the names for all features/variables
* @param isCont boolean value to indicate whether according feature is continuous
* @param k the number of classes
* @param cn the names for all classes
* @param vc the value count array indicating number of distinct values per feature
*/
class DecisionTreeC45 (val x: MatrixI, val y: VectorI, fn: Array [String], isCont: Array [Boolean],
k: Int, cn: Array [String], private var vc: VectorI = null)
extends ClassifierInt (x, y, fn, k, cn)
{
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Class that contains information for a tree node.
* @param f feature of the node, if it is leaf, contains the feature of its parent
* @param value branch value
* @param theshold threshold for continuous feature
* @param leaf boolean value indicate whether is leaf node
* @param decision decision if it is leaf node
*/
class Node (val f: Int, var value: Int, val threshold: Double = -1,
val leaf: Boolean = false, val decision: Int = -1)
{
var next = new MutableList [DecisionTreeC45#Node] ()
override def toString (): String =
{
if (leaf) {
"Node (LeafOf: " + fn(f) + "\\t" + "BranchValue: " + value + "\\tclass: " + decision + ")"
} else if (isCont (f)) {
if (value == -1) "Node (feature: " + fn(f) + "\\t BranchValue: ROOT" + "\\tThreshold: " + threshold + ")"
else "Node (feature: " + fn(f) + "\\t BranchValue: " + value + "\\tThreshold: " + threshold + ")"
} else {
if (value == -1) "Node (feature: " + fn(f) + "\\t BranchValue: ROOT" + ")"
else "Node (feature: " + fn(f) + "\\t BranchValue: " + value + ")"
} // if
} // toString
} // Node class
private val DEBUG = true // debug flag
private val y_prob = new VectorD (k) // probability that class c occurs
if (vc == null) vc = vc_default // set value count (vs) to default for binary data (2)
for (i <- 0 until m) y_prob(y(i)) += 1
y_prob /= md
private val entropy_0 = entropy (y_prob) // the initial entropy
private var root: DecisionTreeC45#Node = null // decision tree, store according to layers of tree
private var threshold = new Array [Double] (n) // threshold for continuous features (below <=, above >)
for (i <- 0 until n if isCont(i)) vc(i) = 2 // for continuous features set vc to 2 (below, above)
if (DEBUG) println ("Constructing a C45 Decision Tree: initial entropy = " + entropy_0)
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Given a feature column (e.g., 2 (Humidity)) and a value (e.g., 1 (High))
* use the frequency of ocurrence the value for each classification
* (e.g., 0 (no), 1 (yes)) to estimate k probabilities. Also, determine
* the fraction of training cases where the feature has this value
* (e.g., fraction where Humidity is High = 7/14).
* @param fCol a feature column to consider (e.g., Humidity)
* @param value one of the possible values for this feature (e.g., 1 (High))
* @param cont indicates whether is calculating continuous feature
* @param thres threshold for continuous feature
*/
def frequency (fCol: VectorI, value: Int, cont: Boolean = false, thres: Double = 0):
Tuple2 [Double, VectorD] =
{
val prob = new VectorD (k) // probability vector for a given feature and value
var count = 0.0
if (cont) { // feature with continuous values
if (value == 0) {
for (i <- 0 until m if fCol(i) <= thres) { // below threshold
count += 1.0
prob(y(i)) += 1
} // for
} else {
for (i <- 0 until m if fCol(i) > thres) { // above threshold
count += 1.0
prob(y(i)) += 1
} // for
} // if
} else { // feature with discrete values
for (i <- 0 until m if fCol(i) == value) {
count += 1.0
prob(y(i)) += 1
} // for
} // if
(count / md, prob /= count) // return the fraction and the probability vector
} // frequency
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Compute the information gain due to using the values of a feature/attribute
* to distinguish the training cases (e.g., how well does Humidity with its
* values Normal and High indicate whether one will play tennis).
* @param f the feature to consider (e.g., 2 (Humidity))
*/
def gain (f: Int): Double =
{
val fCol = x.col(f) // extract column f from data matrix x
val vals = vc(f) // the number of distinct values for feature f
var sum = 0.0
for (i <- 0 until vals) {
val (coun_fi, prob_fi) = frequency (fCol, i, isCont(f), threshold(f))
val entr_fi = entropy (prob_fi) // entropy for feature f value i
if (DEBUG) println ("\\tentropy from feature " + f + " for value " + i + " is " + entr_fi)
sum += coun_fi * entr_fi
} // for
val igain = entropy_0 - sum // the drop in entropy
//println ("gain from feature " + f + " is " + igain)
igain // return the information gain
} // gain
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Given a continuous feature, adjust its threshold to improve gain.
* @param f the feature index to consider
*/
def calThreshold (f: Int)
{
var thres = 0.0 // start with a threshold of 0
var tmpThres = 0.0 // try other thresholds
var maxGain = -1.0 // maximum gain
var tmpGain = 0.0 // gain with current threshold
var fCol = x.col(f) // feature column
var values = new MutableList [Double] () // values for feature
for (i <- 0 until m if ! values.contains (fCol(i))) values += fCol(i)
values = values.sorted
if (DEBUG) {
println("\\n ************ Threshold calculation for feature = " + f)
println("possible value for feature = " + f + " are: " + values)
} // if
for (i <- 0 until values.length - 1) {
tmpThres = (values(i) + values(i+1)) / 2.0
threshold(f) = tmpThres // tmp change for gain calculation
tmpGain = gain (f) // compute gain with new threshold
if (DEBUG) println ("for threshold " + tmpThres + " the gain is " + tmpGain)
if (tmpGain > maxGain) {
thres = tmpThres // found a better threshold
maxGain = tmpGain // save better gain
} // if
} // for
threshold(f) = thres // save best threshold for this feature
if (DEBUG) println ("for feature "+ f + " threshold = " + thres)
} // calThreshold
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Return new x matrix and y array for next step of constructing decision tree.
* @param f the feature index
* @param value one of the features values
*/
def nextXY (f: Int, value: Int): Tuple2 [MatrixI, VectorI] =
{
var count = 0
if (isCont(f)) { // feature with continuous values
if (value == 0) {
for (i <- 0 until m if x(i, f) <= threshold(f)) count += 1
} else {
for (i <- 0 until m if x(i, f) > threshold(f)) count += 1
} // if
} else { // feature with discrete values
for (i <- 0 until m if x(i, f) == value) count += 1
} // if
val nx = new MatrixI (count, n) // new x matrix
val ny = new VectorI (count) // new y array
var x_index = 0
if (isCont(f)) { // feature with continuous values
if (value == 0) {
for (i <- 0 until m if x(i, f) <= threshold(f)) {
ny(x_index) = y(i)
nx(x_index) = x(i)
x_index += 1
} // for
} else {
for (i <- 0 until m if x(i, f) > threshold(f)) {
ny(x_index) = y(i)
nx(x_index) = x(i)
x_index += 1
} // for
} // if
} else { // feature with discrete values
for (i <- 0 until m if x(i, f) == value) {
ny(x_index) = y(i)
nx(x_index) = x(i)
x_index += 1
} // for
} // if
(nx, ny)
} // nextXY
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Train the classifier, i.e., determine which feature provides the most
* information gain and select it as the root of the decision tree.
* @param testStart starting index of test region (inclusive) used in cross-validation.
* @param testEnd ending index of test region (exclusive) used in cross-validation.
*/
def train (testStart: Int, testEnd: Int) // FIX - use these parameters
{
if (DEBUG) {
println ("train: inputs:")
println ("\\t x = " + x)
println ("\\t y = " + y)
println ("\\t vc = " + vc)
println ("\\t isCont = " + isCont.deep)
} // if
for (f <- 0 until n if isCont(f)) calThreshold (f) // set threshold for cont. features
var opt = (0, gain (0)) // compute gain for feature 0
if (DEBUG) println ("train: for feature " + opt._1 + " the gain is " + opt._2)
for (f <- 1 until n) {
val fgain = gain (f) // compute gain for feature f
if (DEBUG) println ("train: for feature " + f + " the gain is " + fgain)
if (fgain > opt._2) opt = (f, fgain) // save feature giving best gain
} // for
if (DEBUG) println ("train: \\noptimal feature is " + opt._1 + " with a gain of " + opt._2)
buildTree (opt)
} // train
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Given the next most distinguishing feature/attribute, extend the
* decision tree.
* @param opt the optimal feature and its gain
*/
def buildTree (opt: Tuple2 [Int, Double])
{
root = if (isCont(opt._1)) new Node (opt._1, -1, threshold(opt._1)) else new Node (opt._1, -1)
for (i <- 0 until vc(opt._1)) {
var next = nextXY (opt._1, i)
var flag = true
for (j <- 0 until next._2.dim-1 if next._2(j) != next._2(j+1)) flag = false
if (flag) {
root.next += new Node (opt._1, root.next.length, -1, true, next._2(1))
if (DEBUG) {
println (" --> Leaf = " + root.next)
println ("\\t x = " + next._1)
println ("\\t y = " + next._2)
println ("\\t vc = " + vc)
println ("\\t isCont = " + isCont.deep)
} // if
} else {
var subtree = new DecisionTreeC45 (next._1, next._2, fn, isCont, k, cn, vc)
subtree.train ()
subtree.root.value = root.next.length
root.next += subtree.root
} // if
} // if
} // buildTree
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Print out the decision tree using Breadth First Search (BFS).
*/
def printTree
{
println("\\n*********************")
println(" DecisionTree:\\n")
var queue = new Queue [DecisionTreeC45#Node] ()
queue += root
do {
var nd = queue.dequeue
for (i <- 0 until nd.next.length) queue += nd.next(i)
if (isCont (nd.f) && ! nd.leaf) {
println (nd + " --> (" + nd.next.length + " )")
} else {
println (nd + " --> (" + nd.next.length + " )")
} // if
} while ( ! queue.isEmpty)
} // printTree
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Given a data vector z, classify it returning the class number (0, ..., k-1)
* by following a decision path from the root to a leaf.
* @param z the data vector to classify (purely discrete features)
*/
def classify (z: VectorI): Tuple2 [Int, String] =
{
if(DEBUG) println ("classify: purely discrete features\\n")
for (i <- 0 until n if z(i) >= vc(i)) {
println("classify: the " + i + "th value is too large")
break
} // for
var nd = root // current node
var step = 0
while ( ! nd.leaf) {
println ("classify: step-" + step + ": " + nd)
nd = nd.next(z(nd.f))
step += 1
} // while
println ("classify step-" + step + ": " + nd + "\\n")
val best = nd.decision
(best, cn(best))
} // classify
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Given a data vector z, classify it returning the class number (0, ..., k-1)
* by following a decision path from the root to a leaf.
* @param z the data vector to classify (some continuous features)
*/
override def classify (z: VectorD): Tuple2 [Int, String] =
{
if(DEBUG) println ("classify: some continuous features\\n")
var nd = root // current node
var step = 0
while ( ! nd.leaf) {
println ("classify: step-" + step + ": " + nd)
if (isCont (nd.f)) {
nd = if (z(nd.f) <= nd.threshold) nd.next(0) else nd.next(1)
} else {
nd = nd.next(z(nd.f).toInt)
} // if
step += 1
} // while
println ("classify step-" + step + ": " + nd + "\\n")
val best = nd.decision
(best, cn(best))
} // classify
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Reset or re-initialize the frequency tables and the probability tables.
*/
def reset ()
{
// FIX: to be implemented
} // reset
} // DecisionTreeC45 class
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** `DecisionTreeC45` is the companion object for the `DecisionTreeC45` class.
*/
object DecisionTreeC45
{
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Create a 'DecisionTreeID3` object, passing 'x' and 'y' together in one table.
* @param xy the data vectors along with their classifications stored as rows of a matrix
* @param fn the names for all features/variables
* @param isCont boolean value to indicate whether according feature is continuous
* @param k the number of classes
* @param cn the names for all classes
* @param vc the value count array indicating number of distinct values per feature
*/
def apply (xy: MatrixI, fn: Array [String], isCont: Array [Boolean], k: Int, cn: Array [String],
vc: VectorI = null) =
{
new DecisionTreeC45 (xy(0 until xy.dim1, 0 until xy.dim2-1), xy.col(xy.dim2-1), fn, isCont, k, cn, vc)
} // apply
} // DecisionTreeC45 object
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `DecisionTreeC45Test` object is used to test the `DecisionTreeC45` class.
* Ex: Classify (No/Yes) whether a person will play tennis based on the measured
* features.
* @see http://www.cise.ufl.edu/~ddd/cap6635/Fall-97/Short-papers/2.htm
*/
object DecisionTreeC45Test extends App
{
// training-set -----------------------------------------------------------
// Outlook: Rain (0), Overcast (1), Sunny (2)
// Temperature: Cold (0), Mild (1), Hot (2)
// Humidity: Normal (0), High (1)
// Wind: Weak (0), Strong (1)
// features: Outlook Temp Humidity Wind
val x = new MatrixI ((14, 4), 2, 2, 1, 0, // day 1 - data matrix
2, 2, 1, 1, // day 2
1, 2, 1, 0, // day 3
0, 1, 1, 0, // day 4
0, 0, 0, 0, // day 5
0, 0, 0, 1, // day 6
1, 0, 0, 1, // day 7
2, 1, 1, 0, // day 8
2, 0, 0, 0, // day 9
0, 1, 0, 0, // day 10
2, 1, 0, 1, // day 11
1, 1, 1, 1, // day 12
1, 2, 0, 0, // day 13
0, 1, 1, 1) // day 14
// day: 1 2 3 4 5 6 7 8 9 10 11 12 13 14
val y = VectorI (0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0) // classification vector: 0(No), 1(Yes))
val vc = VectorI (3, 3, 2, 2) // distinct values for each feature
val fn = Array ("Outlook", "Temp", "Humidity", "Wind") // feature names
val flag = Array (false, false, false, false) // is continuous
// val x = new MatrixI ((14, 4), 0, 85, 85, 0,
// 0, 80, 90, 1,
// 1, 83, 78, 0,
// 2, 70, 96, 0,
// 2, 68, 80, 0,
// 2, 65, 70, 1,
// 1, 64, 65, 1,
// 0, 72, 95, 0,
// 0, 69, 70, 0,
// 2, 75, 80, 0,
// 0, 75, 70, 1,
// 1, 72, 90, 1,
// 1, 81, 75, 0,
// 2, 71, 80, 1)
//
// val y = VectorI (0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0) // classification vector: 0(No), 1(Yes))
// val vc = VectorI (3, 3, 2, 2) // distinct values for each feature
// val flag = Array (false, true, true, false)
// train the classifier ---------------------------------------------------
val cl = new DecisionTreeC45 (x, y, fn, flag, 2, null, vc) // create the classifier
cl.train ()
cl.printTree
// test sample ------------------------------------------------------------
// val z = VectorD (2, 100, 77.5, 0) // new data vector to classify
// println ("--- classify " + z + " = " + cl.classify (z) + "\\n")
// val z2 = VectorD (2, 100, 77.6, 1) // new data vector to classify
// println ("--- classify " + z2 + " = " + cl.classify (z2) + "\\n")
} // DecisionTreeC45Test object
|
mvnural/scalation
|
src/main/scala/scalation/analytics/DecisionTreeC45.scala
|
Scala
|
mit
| 21,662 |
import Allergen.Allergen
class Allergies {
private lazy val allergenList = Allergen.values
def isAllergicTo(allergen: Allergen, score: Int): Boolean =
(allergen.id & score) != 0
def allergies(score: Int): List[Allergen] =
allergenList.filter(a => isAllergicTo(a, score)).toList
}
object Allergies {
def apply() = new Allergies
}
object Allergen extends Enumeration {
type Allergen = Value
val Eggs = Value(1, "Eggs")
val Peanuts = Value(2, "Peanuts")
val Shellfish = Value(4, "Shellfish")
val Strawberries = Value(8, "Strawberries")
val Tomatoes = Value(16, "Tomatoes")
val Chocolate = Value(32, "Chocolate")
val Pollen = Value(64, "Pollen")
val Cats = Value(128, "Cats")
}
|
nlochschmidt/xscala
|
allergies/example.scala
|
Scala
|
mit
| 713 |
package ch.epfl.scala.index
package server
package routes
import akka.http.scaladsl.server.Directives._
object Assets {
val routes =
get {
path("assets" / "lib" / Remaining) { path ⇒
getFromResource("lib/" + path)
} ~
path("assets" / "img" / Remaining) { path ⇒
getFromResource("img/" + path)
} ~
path("assets" / "css" / Remaining) { path ⇒
getFromResource("css/" + path)
} ~
path("assets" / "js" / Remaining) { path ⇒
getFromResource("js/" + path)
} ~
path("assets" / "client-opt.js") {
getFromResource("client-opt.js")
} ~
path("assets" / "client-fastopt.js") {
getFromResource("client-fastopt.js")
} ~
path("assets" / "client-opt.js.map") {
getFromResource("client-opt.js.map")
} ~
path("assets" / "client-fastopt.js.map") {
getFromResource("client-fastopt.js.map")
} ~
path("assets" / "client-jsdeps.js") {
getFromResource("client-jsdeps.js")
}
}
}
|
adamwy/scaladex
|
server/src/main/scala/ch.epfl.scala.index.server/routes/Assets.scala
|
Scala
|
bsd-3-clause
| 1,101 |
package com.netscout.aion2.split
import com.netscout.aion2.SplitStrategy
import com.netscout.aion2.except.IllegalQueryException
import com.netscout.aion2.model.{QueryStrategy, EmptyQueryStrategy}
import java.time.{Duration, Instant}
import java.util.Date
import javax.ws.rs.core.MultivaluedMap
import scala.concurrent.duration.FiniteDuration
class DurationSplitStrategy(maybeCfg: Option[Map[String, String]]) extends SplitStrategy {
import java.util.UUID
import scala.language.implicitConversions
val maybeDuration = for {
cfg <- maybeCfg
durationStr <- cfg.get("duration")
d <- Some(Duration.parse(durationStr))
} yield d
val duration = maybeDuration match {
case Some(d) => d
case None => throw new Exception("duration must be supplied as configuration for DurationSplitStrategy")
}
private def roundInstant(i: Instant) = {
val durTime = duration.getSeconds
val s = i.getEpochSecond
val roundedS = s - (s % durTime)
Instant.EPOCH.plusSeconds(roundedS)
}
implicit def instantToDate(i: Instant) = Date.from(i)
implicit def dateToInstant(d: Date) = d.toInstant
implicit def uuidToInstant(uuid: UUID): Instant = {
import com.datastax.driver.core.utils.UUIDs
new Date(UUIDs.unixTimestamp(uuid))
}
class InstantRange (
val start: Instant,
val end: Instant
) extends Iterable[Instant] {
override def iterator = {
new Iterator[Instant] {
var current = start.minus(duration)
override def hasNext = {
val possibleNext = current.plus(duration)
possibleNext.isBefore(end) || possibleNext.equals(end)
}
override def next = {
if (hasNext) {
current = current.plus(duration)
current
} else {
null
}
}
}
}
}
class RangeQueryStrategy (
val fromDate: Instant,
val toDate: Instant
) extends QueryStrategy {
override def minimum: Date = fromDate
override def maximum: Date = toDate
private def durTime = duration.getSeconds
private def minRow = roundInstant(fromDate)
private def maxRow = roundInstant(toDate)
override def fullRows = {
import java.time.temporal.ChronoUnit._
if (fromDate.until(toDate, SECONDS) < durTime) {
None
} else {
val min = minRow.plus(duration)
val max = maxRow.minus(duration)
if (max.isBefore(min)) {
None
} else {
Some(new InstantRange(min, max).map(i => instantToDate(i)))
}
}
}
override def partialRows = {
val minDate: Date = minRow
val maxDate: Date = maxRow
if (minRow.equals(maxRow)) {
Seq(minDate)
} else {
Seq(minDate, maxDate)
}
}
}
override def rowKey(obj: Object) = {
val inputInstant: Instant = obj match {
case x: Instant => x
case x: Date => x
case uuid: UUID => uuid
case _ => throw new IllegalQueryException(s"Value of type ${obj.getClass.getName} cannot be used as a value for DurationSplitStrategy")
}
val outputDate: Date = roundInstant(inputInstant)
outputDate
}
/**
* Convenience method to parse an Instant from a query parameter string
*
* Supports "now" strings, or ISO8601 formatted strings
*
* @see java.time.Instant.parse
* @param str the query parameter string to parse
* @return a java.time.Instant represented by str
*/
def parseInstant(str: String) = str match {
case "now" => Instant.now()
case _ => Instant.parse(str)
}
/**
* Implicit extensions to MultivaluedMap for retreiving query
* parameters that should have exactly one value
*/
implicit class MultivaluedMapExtensions[K, V](
val map: MultivaluedMap[K, V]
) {
/**
* Gets a parameter that should have only one value
*
* @param key the key to look up in the map
* @return (optionally) the first value in the map for the desired key
*/
def getSingleValue(key: String) = for {
values <- Option(map.get(key))
value <- Option(values.get(0))
} yield value
}
override def strategyForQuery(params: MultivaluedMap[String, String]): QueryStrategy = {
// Gets a single-valued param from the multivaluedmap of parameters
def getManditoryParam(paramName: String) = params.getSingleValue(paramName).getOrElse(throw new IllegalQueryException(s"\\'${paramName}\\' parameter must be supplied"))
val parseParam = parseInstant _ compose getManditoryParam _
var fromDate: Instant = null
var toDate: Instant = null
try {
fromDate = parseParam("from")
toDate = parseParam("to")
if (fromDate == null || toDate == null) {
throw new Exception("Both \\'from\\' and \\'to\\' must parse to non-null dates")
}
} catch {
case (e: Exception) => {
throw new IllegalQueryException("", e) // TODO: investigate why this was ever needed
}
}
// If the dates are equal, we get to terminate early, because no data will be queried
if (toDate.equals(fromDate)) {
return EmptyQueryStrategy
}
// If the ordering of the dates is messed up, the query is impossible to perform
if (toDate.isBefore(fromDate)) {
throw new IllegalQueryException("\\'from\\' date must be before \\'to\\' date", null)
}
new RangeQueryStrategy(fromDate, toDate)
}
}
|
FlukeNetworks/aion
|
src/main/scala/com/netscout/aion2/split/DurationSplitStrategy.scala
|
Scala
|
apache-2.0
| 5,410 |
/**
* Copyright 2011-2017 GatlingCorp (http://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.core.util
object Shard {
private[this] def sumFromZero(total: Long, buckets: Int, bucketNumber: Int): Long =
if (bucketNumber == -1) {
0L
} else if (bucketNumber == buckets - 1) {
// because of rounding, we might be one off on last bucket
total
} else {
// +1 is because we want a non zero value in first bucket
math.ceil(total.toDouble / buckets * (bucketNumber + 1)).toLong
}
def shard(total: Long, bucketNumber: Int, buckets: Int): Shard = {
val offset = sumFromZero(total, buckets, bucketNumber - 1)
val value = sumFromZero(total, buckets, bucketNumber) - offset
Shard(offset.toInt, value.toInt)
}
def shards(total: Long, buckets: Int): Iterator[Long] =
new Iterator[Long] {
private[this] var currentIndex = 0
private[this] var previousSumFromZero = 0L
override def hasNext: Boolean = currentIndex < buckets
override def next(): Long = {
val newSumFromZero = sumFromZero(total, buckets, currentIndex)
val res = newSumFromZero - previousSumFromZero
currentIndex += 1
previousSumFromZero = newSumFromZero
res
}
}
}
case class Shard(offset: Int, length: Int)
|
pwielgolaski/gatling
|
gatling-core/src/main/scala/io/gatling/core/util/Shard.scala
|
Scala
|
apache-2.0
| 1,848 |
package scala_pastebin
import java.io.File
case class CommandLineConfig(
api_dev_key: String = ApiDevKey(),
var api_paste_code: String = "",
api_paste_name: String = "",
api_paste_format: String = "java",
api_paste_expire_date: String = "10M",
api_option: String = "paste",
file: String = null)
|
humbhenri/scala_pastebin
|
src/main/scala/scala_pastebin/CommandLineConfig.scala
|
Scala
|
unlicense
| 310 |
/*
* # Trove
*
* This file is part of Trove - A FREE desktop budgeting application that
* helps you track your finances, FREES you from complex budgeting, and
* enables you to build your TROVE of savings!
*
* Copyright © 2016-2021 Eric John Fredericks.
*
* Trove is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Trove is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Trove. If not, see <http://www.gnu.org/licenses/>.
*/
package trove.ui
import grizzled.slf4j.Logging
import javafx.application.{Application => JFXApplication}
import scalafx.Includes._
import scalafx.application.JFXApp.PrimaryStage
import scalafx.application.{JFXApp, Platform}
import scalafx.scene.control.Alert.AlertType
import trove.constants.ApplicationName
import trove.core.Trove
import trove.core.infrastructure.event.Event
import trove.events.ProjectChanged
import trove.ui.ButtonTypes._
import trove.ui.fxext.AppModalAlert
// Linux workaround using Java 12 / JavaFx 13.
// May be fixed in Java 13 / JavaFX 13.
//https://stackoverflow.com/questions/57425854/javafx-window-opens-in-top-left-corner-then-jumps-to-center
//-Djdk.gtk.version=2
private[ui] object Main extends JFXApp with Logging {
System.setProperty("prism.lcdtext", "true")
JFXApplication.setUserAgentStylesheet(JFXApplication.STYLESHEET_MODENA)
Platform.implicitExit = false
promptUserWithError(Trove.startup()).recover { case _ => shutdown()}
stage = new PrimaryStage with UIEventListener {
title = ApplicationName
setWelcomeScene()
icons += ApplicationIconImage64
onCloseRequest = ev => {
logger.debug("user close requested")
if (conditionallyQuit()) {
logger.debug("Close request confirmed")
}
else {
ev.consume()
}
}
// We will use 0 for the "global" / application-level event subscriber group.
// All other listeners should use one specific to the current project.
override val eventSubscriberGroup = 0
def onReceive: PartialFunction[Event, Unit] = {
case ProjectChanged(maybeProject) => maybeProject.fold[Unit](setWelcomeScene()){ prj =>
hide()
maximized = true
title = s"$ApplicationName [ ${prj.name} ]"
scene = new ActiveProjectScene(43, prj) // ejf-fixMe: subscriber group!!
show()
}
}
private[this] def setWelcomeScene(): Unit = {
title = ApplicationName
maximized = false
height = 600
width = 800
centerOnScreen()
scene = new WelcomeScene
}
}
def conditionallyQuit(): Boolean = {
if(confirmQuitWithUser()) {
shutdown()
true
}
else {
false
}
}
def showHelpAboutDialog(): Unit = {
logger.debug("showHelpAboutDialog called")
new HelpAboutDialog().showAndWait()
}
private[this] def confirmQuitWithUser(): Boolean = {
logger.debug("showQuitDialog called")
val result = new AppModalAlert(AlertType.Confirmation) {
headerText = "Exit Trove?"
buttonTypes = Seq(Yes,No)
contentText = "Are you sure you want to exit Trove?"
}.showAndWait()
result.map(bt => if(bt == Yes) true else false).fold(false)(identity)
}
def shutdown(): Unit = {
promptUserWithError(Trove.shutdown())
logger.debug("Application closing")
Platform.exit()
}
}
|
emanchgo/trove
|
src/main/scala/trove/ui/Main.scala
|
Scala
|
gpl-3.0
| 3,777 |
package lang.lightweightjava.ast.statement
import lang.lightweightjava.ast._
import name.namegraph.NameGraphExtended
import name.{Name, Renaming}
case class StatementBlock(blockBody: Statement*) extends Statement {
override def allNames = blockBody.foldLeft(Set[Name]())(_ ++ _.allNames)
override def rename(renaming: Renaming) = StatementBlock(blockBody.map(_.rename(renaming)): _*)
override def typeCheckForTypeEnvironment(program: Program, typeEnvironment: TypeEnvironment) = {
blockBody.foldLeft(typeEnvironment)((oldEnvironment, statement) => statement.typeCheckForTypeEnvironment(program, oldEnvironment))
typeEnvironment
}
// Aggregate the final name graph of the block by resolving each statement with the name environment after the previous ones are evaluated
override def resolveNames(nameEnvironment: ClassNameEnvironment, methodEnvironment: VariableNameEnvironment, typeEnvironment : TypeEnvironment) =
(blockBody.foldLeft((NameGraphExtended(Set(), Map()), (methodEnvironment, typeEnvironment)))((result, statement) => {
val statementResult = statement.resolveNames(nameEnvironment, methodEnvironment, typeEnvironment)
(result._1 + statementResult._1, statementResult._2)
})._1, (methodEnvironment, typeEnvironment))
override def toString(preTabs: String) = {
val innerPreTabs = preTabs + "\\t"
"{\\n" + blockBody.foldLeft("")(_ + innerPreTabs + _.toString(innerPreTabs) + "\\n") + preTabs + "}"
}
override def toString = {
"{\\n\\t" + blockBody.foldLeft("")(_ + "\\t" + _.toString("\\t") + "\\n") + "}"
}
}
|
matthisk/hygienic-transformations
|
scala/src/main/scala/lang/lightweightjava/ast/statement/StatementBlock.scala
|
Scala
|
lgpl-3.0
| 1,579 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.rdd
import org.apache.spark.unsafe.types.UTF8String
/**
* This holds file names of the current Spark task. This is used in HadoopRDD,
* FileScanRDD, NewHadoopRDD and InputFileName function in Spark SQL.
*/
private[spark] object InputFileNameHolder {
/**
* The thread variable for the name of the current file being read. This is used by
* the InputFileName function in Spark SQL.
*/
private[this] val inputFileName: ThreadLocal[UTF8String] = new ThreadLocal[UTF8String] {
override protected def initialValue(): UTF8String = UTF8String.fromString("")
}
def getInputFileName(): UTF8String = inputFileName.get()
private[spark] def setInputFileName(file: String) = inputFileName.set(UTF8String.fromString(file))
private[spark] def unsetInputFileName(): Unit = inputFileName.remove()
}
|
gioenn/xSpark
|
core/src/main/scala/org/apache/spark/rdd/InputFileNameHolder.scala
|
Scala
|
apache-2.0
| 1,642 |
package org.scalafmt
import org.scalafmt.config.ScalafmtConfig
import org.scalafmt.util.DiffAssertions
import org.scalatest.FunSuite
class RangeTest extends FunSuite with DiffAssertions {
test("range preserves indent") {
val original = """object a {
|val x = 1
|val y = 2
|}
""".stripMargin
val expected = """object a {
|val x = 1
| val y = 2
|}
""".stripMargin
val obtained = Scalafmt
.format(
original,
ScalafmtConfig.unitTest40,
range = Set(Range(2, 2).inclusive)
)
.get
assertNoDiff(obtained, expected)
}
}
|
olafurpg/scalafmt
|
scalafmt-tests/src/test/scala/org/scalafmt/RangeTest.scala
|
Scala
|
apache-2.0
| 714 |
package chrome.tabs.bindings
import chrome.windows.bindings.Window
import scala.scalajs.js
class DetachInfo extends js.Object {
def oldWindowId: Window.Id = js.native
def oldPosition: Int = js.native
}
|
amsayk/scala-js-chrome
|
bindings/src/main/scala/chrome/tabs/bindings/DetachInfo.scala
|
Scala
|
mit
| 213 |
package scalan
import java.io.File
import scalan.compilation.{GraphVizExport, GraphVizConfig}
/**
* Base trait for testing specific rewrite rules
*/
trait RewriteRuleSuite[A] extends BaseShouldTests {
lazy val folder = new File(prefix, suiteName)
def getCtx: TestCtx
trait TestCtx extends ScalanDslExp {
def testLemma: RRewrite[A]
def testExpr(): Exp[A]
def expected: Exp[A]
lazy val rule = patternRewriteRule(testLemma)
}
"ScalanCtx" should "stage Lemma" in {
val ctx = getCtx
ctx.emitDepGraph(ctx.testLemma, new File(folder, "testLemma.dot"))(GraphVizConfig.default)
}
it should "create LemmaRule" in {
val ctx = getCtx
import ctx._
ctx.emitDepGraph(List(testLemma, rule.lhs, rule.rhs), new File(folder, "testRule.dot"))(GraphVizConfig.default)
}
it should "create ProjectionTree in pattern" in {
val ctx = getCtx
import ctx._
ctx.emitDepGraph(List(rule.lhs, rule.rhs), new File(folder, "testPatternAndRhs.dot"))(GraphVizConfig.default)
}
it should "recognize pattern" in {
val ctx = getCtx
import ctx._
patternMatch(rule.lhs, testExpr()) match {
case Some(subst) =>
subst should not be(Map.empty)
case _ =>
fail("should recognize pattern")
}
}
it should "apply pattern" in {
val ctx = getCtx
import ctx._
val test = testExpr()
val rewritten = rule(test)
rewritten match {
case Some(res) =>
ctx.emitDepGraph(List(Pair(test, res)), new File(folder, "LemmaRule/originalAndRewritten.dot"))(GraphVizConfig.default)
case _ =>
fail("should apply pattern")
}
}
it should "rewrite when registered" in {
val ctx = getCtx
import ctx._
val withoutRule = testExpr()
addRewriteRules(rule)
val withRule = testExpr()
removeRewriteRules(rule)
ctx.emitDepGraph(List(withoutRule, withRule), new File(folder, "LemmaRule/ruleRewriting.dot"))(GraphVizConfig.default)
val expectedResult = expected
alphaEqual(withRule, expectedResult) should be(true)
alphaEqual(withoutRule, expectedResult) should be(false)
val afterRemoval = testExpr()
ctx.emitDepGraph(List(withoutRule, withRule, afterRemoval), new File(folder, "LemmaRule/ruleRewriting.dot"))(GraphVizConfig.default)
alphaEqual(afterRemoval, withoutRule) should be(true)
}
}
|
PCMNN/scalan-ce
|
core/src/test/scala/scalan/RewriteRuleSuite.scala
|
Scala
|
apache-2.0
| 2,355 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.evaluation
import org.apache.spark.SparkFunSuite
import org.apache.spark.ml.linalg.Vectors
import org.apache.spark.ml.param.ParamsSuite
import org.apache.spark.ml.util.{DefaultReadWriteTest, MLTestingUtils}
import org.apache.spark.mllib.util.MLlibTestSparkContext
class BinaryClassificationEvaluatorSuite
extends SparkFunSuite with MLlibTestSparkContext with DefaultReadWriteTest {
import testImplicits._
test("params") {
ParamsSuite.checkParams(new BinaryClassificationEvaluator)
}
test("read/write") {
val evaluator = new BinaryClassificationEvaluator()
.setRawPredictionCol("myRawPrediction")
.setLabelCol("myLabel")
.setMetricName("areaUnderPR")
testDefaultReadWrite(evaluator)
}
test("should accept both vector and double raw prediction col") {
val evaluator = new BinaryClassificationEvaluator()
.setMetricName("areaUnderPR")
val vectorDF = Seq(
(0.0, Vectors.dense(12, 2.5)),
(1.0, Vectors.dense(1, 3)),
(0.0, Vectors.dense(10, 2))
).toDF("label", "rawPrediction")
assert(evaluator.evaluate(vectorDF) === 1.0)
val doubleDF = Seq(
(0.0, 0.0),
(1.0, 1.0),
(0.0, 0.0)
).toDF("label", "rawPrediction")
assert(evaluator.evaluate(doubleDF) === 1.0)
val stringDF = Seq(
(0.0, "0.0"),
(1.0, "1.0"),
(0.0, "0.0")
).toDF("label", "rawPrediction")
val thrown = intercept[IllegalArgumentException] {
evaluator.evaluate(stringDF)
}
assert(thrown.getMessage.replace("\\n", "") contains "Column rawPrediction must be of type " +
"equal to one of the following types: [double, ")
assert(thrown.getMessage.replace("\\n", "") contains "but was actually of type string.")
}
test("should accept weight column") {
val weightCol = "weight"
// get metric with weight column
val evaluator = new BinaryClassificationEvaluator()
.setMetricName("areaUnderROC").setWeightCol(weightCol)
val vectorDF = Seq(
(0.0, Vectors.dense(2.5, 12), 1.0),
(1.0, Vectors.dense(1, 3), 1.0),
(0.0, Vectors.dense(10, 2), 1.0)
).toDF("label", "rawPrediction", weightCol)
val result = evaluator.evaluate(vectorDF)
// without weight column
val evaluator2 = new BinaryClassificationEvaluator()
.setMetricName("areaUnderROC")
val result2 = evaluator2.evaluate(vectorDF)
assert(result === result2)
// use different weights, validate metrics change
val vectorDF2 = Seq(
(0.0, Vectors.dense(2.5, 12), 2.5),
(1.0, Vectors.dense(1, 3), 0.1),
(0.0, Vectors.dense(10, 2), 2.0)
).toDF("label", "rawPrediction", weightCol)
val result3 = evaluator.evaluate(vectorDF2)
// Since wrong result weighted more heavily, expect the score to be lower
assert(result3 < result)
}
test("should support all NumericType labels and not support other types") {
val evaluator = new BinaryClassificationEvaluator().setRawPredictionCol("prediction")
MLTestingUtils.checkNumericTypes(evaluator, spark)
}
}
|
bdrillard/spark
|
mllib/src/test/scala/org/apache/spark/ml/evaluation/BinaryClassificationEvaluatorSuite.scala
|
Scala
|
apache-2.0
| 3,869 |
package week5
import math.Ordering
object Merge {
def msort[T](xs: List[T])(implicit ord: Ordering[T]): List[T] = {
val n = xs.length / 2
if (n == 0) xs
else {
def merge(xs: List[T], ys: List[T]): List[T] = (xs, ys) match {
case (Nil, ys) => ys
case (xs, Nil) => xs
case (x :: xs1, y :: ys1) =>
if (ord.lt(x, y)) x :: merge(xs1, ys)
else y :: merge(xs, ys1)
}
val (fst, snd) = xs splitAt n
merge(msort(fst), msort(snd))
}
}
val nums = List(2, -4, 5, 7, 1)
val fruits = List("apples", "pineapple", "orange", "banana")
msort(nums)
msort(fruits)
}
|
M4573R/playground-notes
|
functional-programming-principles-in-scala/week5/Merge.scala
|
Scala
|
mit
| 741 |
/*
* Copyright 2015-2016 IBM Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package whisk.core.containerpool
import java.time.Instant
import scala.concurrent.Future
import scala.concurrent.duration._
import scala.util.Success
import scala.util.Failure
import akka.actor.FSM
import akka.actor.Props
import akka.actor.Stash
import akka.actor.Status.{ Failure => FailureMessage }
import akka.pattern.pipe
import spray.json._
import spray.json.DefaultJsonProtocol._
import whisk.common.TransactionId
import whisk.core.connector.ActivationMessage
import whisk.core.container.Interval
import whisk.core.entity._
import whisk.core.entity.size._
import whisk.common.Counter
import whisk.core.entity.ExecManifest.ImageName
// States
sealed trait ContainerState
case object Uninitialized extends ContainerState
case object Starting extends ContainerState
case object Started extends ContainerState
case object Running extends ContainerState
case object Ready extends ContainerState
case object Pausing extends ContainerState
case object Paused extends ContainerState
case object Removing extends ContainerState
// Data
sealed abstract class ContainerData(val lastUsed: Instant)
case class NoData() extends ContainerData(Instant.EPOCH)
case class PreWarmedData(container: Container, kind: String, memoryLimit: ByteSize) extends ContainerData(Instant.EPOCH)
case class WarmedData(container: Container, invocationNamespace: EntityName, action: ExecutableWhiskAction, override val lastUsed: Instant) extends ContainerData(lastUsed)
// Events received by the actor
case class Start(exec: CodeExec[_], memoryLimit: ByteSize)
case class Run(action: ExecutableWhiskAction, msg: ActivationMessage)
case object Remove
// Events sent by the actor
case class NeedWork(data: ContainerData)
case object ContainerPaused
case object ContainerRemoved
/**
* A proxy that wraps a Container. It is used to keep track of the lifecycle
* of a container and to guarantee a contract between the client of the container
* and the container itself.
*
* The contract is as follows:
* 1. Only one job is to be sent to the ContainerProxy at one time. ContainerProxy
* will delay all further jobs until a previous job has finished.
* 2. The next job can be sent to the ContainerProxy after it indicates available
* capacity by sending NeedWork to its parent.
* 3. A Remove message can be sent at any point in time. Like multiple jobs though,
* it will be delayed until the currently running job finishes.
*
* @constructor
* @param factory a function generating a Container
* @param sendActiveAck a function sending the activation via active ack
* @param storeActivation a function storing the activation in a persistent store
*/
class ContainerProxy(
factory: (TransactionId, String, ImageName, Boolean, ByteSize) => Future[Container],
sendActiveAck: (TransactionId, WhiskActivation) => Future[Any],
storeActivation: (TransactionId, WhiskActivation) => Future[Any]) extends FSM[ContainerState, ContainerData] with Stash {
implicit val ec = context.system.dispatcher
// The container is destroyed after this period of time
val unusedTimeout = 10.minutes
// The container is not paused for this period of time
// after an activation has finished successfully
val pauseGrace = 1.second
startWith(Uninitialized, NoData())
when(Uninitialized) {
// pre warm a container
case Event(job: Start, _) =>
factory(
TransactionId.invokerWarmup,
ContainerProxy.containerName("prewarm", job.exec.kind),
job.exec.image,
job.exec.pull,
job.memoryLimit)
.map(container => PreWarmedData(container, job.exec.kind, job.memoryLimit))
.pipeTo(self)
goto(Starting)
// cold start
case Event(job: Run, _) =>
implicit val transid = job.msg.transid
factory(
job.msg.transid,
ContainerProxy.containerName(job.msg.user.namespace.name, job.action.name.name),
job.action.exec.image,
job.action.exec.pull,
job.action.limits.memory.megabytes.MB)
.andThen {
case Success(container) => self ! PreWarmedData(container, job.action.exec.kind, job.action.limits.memory.megabytes.MB)
case Failure(t) =>
val response = t match {
case WhiskContainerStartupError(msg) => ActivationResponse.whiskError(msg)
case BlackboxStartupError(msg) => ActivationResponse.applicationError(msg)
case _ => ActivationResponse.whiskError(t.getMessage)
}
val activation = ContainerProxy.constructWhiskActivation(job, Interval.zero, response)
sendActiveAck(transid, activation)
storeActivation(transid, activation)
}
.flatMap {
container =>
initializeAndRun(container, job)
.map(_ => WarmedData(container, job.msg.user.namespace, job.action, Instant.now))
}.pipeTo(self)
goto(Running)
}
when(Starting) {
// container was successfully obtained
case Event(data: PreWarmedData, _) =>
context.parent ! NeedWork(data)
goto(Started) using data
// container creation failed
case Event(_: FailureMessage, _) =>
context.parent ! ContainerRemoved
stop()
case _ => delay
}
when(Started) {
case Event(job: Run, data: PreWarmedData) =>
implicit val transid = job.msg.transid
initializeAndRun(data.container, job)
.map(_ => WarmedData(data.container, job.msg.user.namespace, job.action, Instant.now))
.pipeTo(self)
goto(Running)
case Event(Remove, data: PreWarmedData) => destroyContainer(data.container)
}
when(Running) {
// Intermediate state, we were able to start a container
// and we keep it in case we need to destroy it.
case Event(data: PreWarmedData, _) => stay using data
// Run was successful
case Event(data: WarmedData, _) =>
context.parent ! NeedWork(data)
goto(Ready) using data
// Failed after /init (the first run failed)
case Event(_: FailureMessage, data: PreWarmedData) => destroyContainer(data.container)
// Failed for a subsequent /run
case Event(_: FailureMessage, data: WarmedData) => destroyContainer(data.container)
// Failed at getting a container for a cold-start run
case Event(_: FailureMessage, _) =>
context.parent ! ContainerRemoved
stop()
case _ => delay
}
when(Ready, stateTimeout = pauseGrace) {
case Event(job: Run, data: WarmedData) =>
implicit val transid = job.msg.transid
initializeAndRun(data.container, job)
.map(_ => WarmedData(data.container, job.msg.user.namespace, job.action, Instant.now))
.pipeTo(self)
goto(Running)
// pause grace timed out
case Event(StateTimeout, data: WarmedData) =>
data.container.suspend()(TransactionId.invokerNanny).map(_ => ContainerPaused).pipeTo(self)
goto(Pausing)
case Event(Remove, data: WarmedData) => destroyContainer(data.container)
}
when(Pausing) {
case Event(ContainerPaused, data: WarmedData) =>
context.parent ! NeedWork(data)
goto(Paused)
case Event(_: FailureMessage, data: WarmedData) => destroyContainer(data.container)
case _ => delay
}
when(Paused, stateTimeout = unusedTimeout) {
case Event(job: Run, data: WarmedData) =>
implicit val transid = job.msg.transid
data.container.resume().andThen {
// Sending the message to self on a failure will cause the message
// to ultimately be sent back to the parent (which will retry it)
// when container removal is done.
case Failure(_) => self ! job
}.flatMap(_ => initializeAndRun(data.container, job))
.map(_ => WarmedData(data.container, job.msg.user.namespace, job.action, Instant.now))
.pipeTo(self)
goto(Running)
// timeout or removing
case Event(StateTimeout | Remove, data: WarmedData) => destroyContainer(data.container)
}
when(Removing) {
case Event(job: Run, _) =>
// Send the job back to the pool to be rescheduled
context.parent ! job
stay
case Event(ContainerRemoved, _) => stop()
case Event(_: FailureMessage, _) => stop()
}
// Unstash all messages stashed while in intermediate state
onTransition {
case _ -> Started => unstashAll()
case _ -> Ready => unstashAll()
case _ -> Paused => unstashAll()
case _ -> Removing => unstashAll()
}
initialize()
/** Delays all incoming messages until unstashAll() is called */
def delay = {
stash()
stay
}
/**
* Destroys the container after unpausing it if needed. Can be used
* as a state progression as it goes to Removing.
*
* @param container the container to destroy
*/
def destroyContainer(container: Container) = {
context.parent ! ContainerRemoved
val unpause = stateName match {
case Paused => container.resume()(TransactionId.invokerNanny)
case _ => Future.successful(())
}
unpause
.flatMap(_ => container.destroy()(TransactionId.invokerNanny))
.map(_ => ContainerRemoved).pipeTo(self)
goto(Removing)
}
/**
* Runs the job, initialize first if necessary.
*
* @param container the container to run the job on
* @param job the job to run
* @return a future completing after logs have been collected and
* added to the WhiskActivation
*/
def initializeAndRun(container: Container, job: Run)(implicit tid: TransactionId): Future[WhiskActivation] = {
val actionTimeout = job.action.limits.timeout.duration
// Only initialize iff we haven't yet warmed the container
val initialize = stateData match {
case data: WarmedData => Future.successful(Interval.zero)
case _ => container.initialize(job.action.containerInitializer, actionTimeout)
}
val activation: Future[WhiskActivation] = initialize.flatMap { initInterval =>
val parameters = job.msg.content getOrElse JsObject()
val environment = JsObject(
"api_key" -> job.msg.user.authkey.compact.toJson,
"namespace" -> job.msg.user.namespace.toJson,
"action_name" -> job.msg.action.qualifiedNameWithLeadingSlash.toJson,
"activation_id" -> job.msg.activationId.toString.toJson,
// compute deadline on invoker side avoids discrepancies inside container
// but potentially under-estimates actual deadline
"deadline" -> (Instant.now.toEpochMilli + actionTimeout.toMillis).toString.toJson)
container.run(parameters, environment, actionTimeout)(job.msg.transid).map {
case (runInterval, response) =>
val initRunInterval = Interval(runInterval.start.minusMillis(initInterval.duration.toMillis), runInterval.end)
ContainerProxy.constructWhiskActivation(job, initRunInterval, response)
}
}.recover {
case InitializationError(interval, response) =>
ContainerProxy.constructWhiskActivation(job, interval, response)
}
// Sending active ack and storing the activation are concurrent side-effects
// and do not block further execution of the future. They are completely
// asynchronous.
activation.andThen {
case Success(activation) => sendActiveAck(tid, activation)
}.flatMap { activation =>
container.logs(job.action.limits.logs.asMegaBytes, job.action.exec.sentinelledLogs).map { logs =>
activation.withLogs(ActivationLogs(logs.toVector))
}
}.andThen {
case Success(activation) => storeActivation(tid, activation)
}.flatMap { activation =>
// Fail the future iff the activation was unsuccessful to facilitate
// better cleanup logic.
if (activation.response.isSuccess) Future.successful(activation)
else Future.failed(ActivationUnsuccessfulError(activation))
}
}
}
object ContainerProxy {
def props(factory: (TransactionId, String, ImageName, Boolean, ByteSize) => Future[Container],
ack: (TransactionId, WhiskActivation) => Future[Any],
store: (TransactionId, WhiskActivation) => Future[Any]) = Props(new ContainerProxy(factory, ack, store))
// Needs to be thread-safe as it's used by multiple proxies concurrently.
private val containerCount = new Counter
/**
* Generates a unique container name.
*
* @param prefix the container name's prefix
* @param suffic the container name's suffix
* @return a unique container name
*/
def containerName(prefix: String, suffix: String) =
s"wsk_${containerCount.next()}_${prefix}_${suffix}".replaceAll("[^a-zA-Z0-9_]", "")
/**
* Creates a WhiskActivation ready to be sent via active ack.
*
* @param job the job that was executed
* @param interval the time it took to execute the job
* @param response the response to return to the user
* @return a WhiskActivation to be sent to the user
*/
def constructWhiskActivation(job: Run, interval: Interval, response: ActivationResponse) = {
val causedBy = if (job.msg.causedBySequence) Parameters("causedBy", "sequence".toJson) else Parameters()
WhiskActivation(
activationId = job.msg.activationId,
namespace = job.msg.activationNamespace,
subject = job.msg.user.subject,
cause = job.msg.cause,
name = job.action.name,
version = job.action.version,
start = interval.start,
end = interval.end,
duration = Some(interval.duration.toMillis),
response = response,
annotations = {
Parameters("limits", job.action.limits.toJson) ++
Parameters("path", job.action.fullyQualifiedName(false).toString.toJson) ++ causedBy
})
}
}
/** Indicates an activation with a non-successful response */
case class ActivationUnsuccessfulError(activation: WhiskActivation) extends Exception(s"activation ${activation.activationId} failed")
|
domdom82/openwhisk
|
core/invoker/src/main/scala/whisk/core/containerpool/ContainerProxy.scala
|
Scala
|
apache-2.0
| 15,749 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.server
import java.net.SocketTimeoutException
import kafka.admin.AdminUtils
import kafka.cluster.BrokerEndPoint
import kafka.log.LogConfig
import kafka.message.ByteBufferMessageSet
import kafka.api.KAFKA_090
import kafka.common.{KafkaStorageException, TopicAndPartition}
import ReplicaFetcherThread._
import org.apache.kafka.clients.{ManualMetadataUpdater, NetworkClient, ClientRequest, ClientResponse}
import org.apache.kafka.common.network.{LoginType, Selectable, ChannelBuilders, NetworkReceive, Selector, Mode}
import org.apache.kafka.common.requests.{ListOffsetResponse, FetchResponse, RequestSend, AbstractRequest, ListOffsetRequest}
import org.apache.kafka.common.requests.{FetchRequest => JFetchRequest}
import org.apache.kafka.common.security.ssl.SslFactory
import org.apache.kafka.common.{Node, TopicPartition}
import org.apache.kafka.common.metrics.Metrics
import org.apache.kafka.common.protocol.{Errors, ApiKeys}
import org.apache.kafka.common.utils.Time
import scala.collection.{JavaConverters, Map, mutable}
import JavaConverters._
class ReplicaFetcherThread(name: String,
fetcherId: Int,
sourceBroker: BrokerEndPoint,
brokerConfig: KafkaConfig,
replicaMgr: ReplicaManager,
metrics: Metrics,
time: Time)
extends AbstractFetcherThread(name = name,
clientId = name,
sourceBroker = sourceBroker,
fetchBackOffMs = brokerConfig.replicaFetchBackoffMs,
isInterruptible = false) {
type REQ = FetchRequest
type PD = PartitionData
private val fetchRequestVersion: Short = if (brokerConfig.interBrokerProtocolVersion.onOrAfter(KAFKA_090)) 1 else 0
private val socketTimeout: Int = brokerConfig.replicaSocketTimeoutMs
private val replicaId = brokerConfig.brokerId
private val maxWait = brokerConfig.replicaFetchWaitMaxMs
private val minBytes = brokerConfig.replicaFetchMinBytes
private val fetchSize = brokerConfig.replicaFetchMaxBytes
private def clientId = name
private val sourceNode = new Node(sourceBroker.id, sourceBroker.host, sourceBroker.port)
// we need to include both the broker id and the fetcher id
// as the metrics tag to avoid metric name conflicts with
// more than one fetcher thread to the same broker
private val networkClient = {
val selector = new Selector(
NetworkReceive.UNLIMITED,
brokerConfig.connectionsMaxIdleMs,
metrics,
time,
"replica-fetcher",
Map("broker-id" -> sourceBroker.id.toString, "fetcher-id" -> fetcherId.toString).asJava,
false,
ChannelBuilders.create(brokerConfig.interBrokerSecurityProtocol, Mode.CLIENT, LoginType.SERVER, brokerConfig.values)
)
new NetworkClient(
selector,
new ManualMetadataUpdater(),
clientId,
1,
0,
Selectable.USE_DEFAULT_BUFFER_SIZE,
brokerConfig.replicaSocketReceiveBufferBytes,
brokerConfig.requestTimeoutMs,
time
)
}
override def shutdown(): Unit = {
super.shutdown()
networkClient.close()
}
// process fetched data
def processPartitionData(topicAndPartition: TopicAndPartition, fetchOffset: Long, partitionData: PartitionData) {
try {
val TopicAndPartition(topic, partitionId) = topicAndPartition
val replica = replicaMgr.getReplica(topic, partitionId).get
val messageSet = partitionData.toByteBufferMessageSet
warnIfMessageOversized(messageSet)
if (fetchOffset != replica.logEndOffset.messageOffset)
throw new RuntimeException("Offset mismatch: fetched offset = %d, log end offset = %d.".format(fetchOffset, replica.logEndOffset.messageOffset))
trace("Follower %d has replica log end offset %d for partition %s. Received %d messages and leader hw %d"
.format(replica.brokerId, replica.logEndOffset.messageOffset, topicAndPartition, messageSet.sizeInBytes, partitionData.highWatermark))
replica.log.get.append(messageSet, assignOffsets = false)
trace("Follower %d has replica log end offset %d after appending %d bytes of messages for partition %s"
.format(replica.brokerId, replica.logEndOffset.messageOffset, messageSet.sizeInBytes, topicAndPartition))
val followerHighWatermark = replica.logEndOffset.messageOffset.min(partitionData.highWatermark)
// for the follower replica, we do not need to keep
// its segment base offset the physical position,
// these values will be computed upon making the leader
replica.highWatermark = new LogOffsetMetadata(followerHighWatermark)
trace("Follower %d set replica high watermark for partition [%s,%d] to %s"
.format(replica.brokerId, topic, partitionId, followerHighWatermark))
} catch {
case e: KafkaStorageException =>
fatal("Disk error while replicating data.", e)
Runtime.getRuntime.halt(1)
}
}
def warnIfMessageOversized(messageSet: ByteBufferMessageSet): Unit = {
if (messageSet.sizeInBytes > 0 && messageSet.validBytes <= 0)
error("Replication is failing due to a message that is greater than replica.fetch.max.bytes. This " +
"generally occurs when the max.message.bytes has been overridden to exceed this value and a suitably large " +
"message has also been sent. To fix this problem increase replica.fetch.max.bytes in your broker config to be " +
"equal or larger than your settings for max.message.bytes, both at a broker and topic level.")
}
/**
* Handle a partition whose offset is out of range and return a new fetch offset.
*/
def handleOffsetOutOfRange(topicAndPartition: TopicAndPartition): Long = {
val replica = replicaMgr.getReplica(topicAndPartition.topic, topicAndPartition.partition).get
/**
* Unclean leader election: A follower goes down, in the meanwhile the leader keeps appending messages. The follower comes back up
* and before it has completely caught up with the leader's logs, all replicas in the ISR go down. The follower is now uncleanly
* elected as the new leader, and it starts appending messages from the client. The old leader comes back up, becomes a follower
* and it may discover that the current leader's end offset is behind its own end offset.
*
* In such a case, truncate the current follower's log to the current leader's end offset and continue fetching.
*
* There is a potential for a mismatch between the logs of the two replicas here. We don't fix this mismatch as of now.
*/
val leaderEndOffset: Long = earliestOrLatestOffset(topicAndPartition, ListOffsetRequest.LATEST_TIMESTAMP,
brokerConfig.brokerId)
if (leaderEndOffset < replica.logEndOffset.messageOffset) {
// Prior to truncating the follower's log, ensure that doing so is not disallowed by the configuration for unclean leader election.
// This situation could only happen if the unclean election configuration for a topic changes while a replica is down. Otherwise,
// we should never encounter this situation since a non-ISR leader cannot be elected if disallowed by the broker configuration.
if (!LogConfig.fromProps(brokerConfig.originals, AdminUtils.fetchEntityConfig(replicaMgr.zkUtils,
ConfigType.Topic, topicAndPartition.topic)).uncleanLeaderElectionEnable) {
// Log a fatal error and shutdown the broker to ensure that data loss does not unexpectedly occur.
fatal("Halting because log truncation is not allowed for topic %s,".format(topicAndPartition.topic) +
" Current leader %d's latest offset %d is less than replica %d's latest offset %d"
.format(sourceBroker.id, leaderEndOffset, brokerConfig.brokerId, replica.logEndOffset.messageOffset))
Runtime.getRuntime.halt(1)
}
warn("Replica %d for partition %s reset its fetch offset from %d to current leader %d's latest offset %d"
.format(brokerConfig.brokerId, topicAndPartition, replica.logEndOffset.messageOffset, sourceBroker.id, leaderEndOffset))
replicaMgr.logManager.truncateTo(Map(topicAndPartition -> leaderEndOffset))
leaderEndOffset
} else {
/**
* The follower could have been down for a long time and when it starts up, its end offset could be smaller than the leader's
* start offset because the leader has deleted old logs (log.logEndOffset < leaderStartOffset).
*
* Roll out a new log at the follower with the start offset equal to the current leader's start offset and continue fetching.
*/
val leaderStartOffset: Long = earliestOrLatestOffset(topicAndPartition, ListOffsetRequest.EARLIEST_TIMESTAMP,
brokerConfig.brokerId)
warn("Replica %d for partition %s reset its fetch offset from %d to current leader %d's start offset %d"
.format(brokerConfig.brokerId, topicAndPartition, replica.logEndOffset.messageOffset, sourceBroker.id, leaderStartOffset))
replicaMgr.logManager.truncateFullyAndStartAt(topicAndPartition, leaderStartOffset)
leaderStartOffset
}
}
// any logic for partitions whose leader has changed
def handlePartitionsWithErrors(partitions: Iterable[TopicAndPartition]) {
delayPartitions(partitions, brokerConfig.replicaFetchBackoffMs.toLong)
}
protected def fetch(fetchRequest: FetchRequest): Map[TopicAndPartition, PartitionData] = {
val clientResponse = sendRequest(ApiKeys.FETCH, Some(fetchRequestVersion), fetchRequest.underlying)
new FetchResponse(clientResponse.responseBody).responseData.asScala.map { case (key, value) =>
TopicAndPartition(key.topic, key.partition) -> new PartitionData(value)
}
}
private def sendRequest(apiKey: ApiKeys, apiVersion: Option[Short], request: AbstractRequest): ClientResponse = {
import kafka.utils.NetworkClientBlockingOps._
val header = apiVersion.fold(networkClient.nextRequestHeader(apiKey))(networkClient.nextRequestHeader(apiKey, _))
try {
if (!networkClient.blockingReady(sourceNode, socketTimeout)(time))
throw new SocketTimeoutException(s"Failed to connect within $socketTimeout ms")
else {
val send = new RequestSend(sourceBroker.id.toString, header, request.toStruct)
val clientRequest = new ClientRequest(time.milliseconds(), true, send, null)
networkClient.blockingSendAndReceive(clientRequest, socketTimeout)(time).getOrElse {
throw new SocketTimeoutException(s"No response received within $socketTimeout ms")
}
}
}
catch {
case e: Throwable =>
networkClient.close(sourceBroker.id.toString)
throw e
}
}
private def earliestOrLatestOffset(topicAndPartition: TopicAndPartition, earliestOrLatest: Long, consumerId: Int): Long = {
val topicPartition = new TopicPartition(topicAndPartition.topic, topicAndPartition.partition)
val partitions = Map(
topicPartition -> new ListOffsetRequest.PartitionData(earliestOrLatest, 1)
)
val request = new ListOffsetRequest(consumerId, partitions.asJava)
val clientResponse = sendRequest(ApiKeys.LIST_OFFSETS, None, request)
val response = new ListOffsetResponse(clientResponse.responseBody)
val partitionData = response.responseData.get(topicPartition)
Errors.forCode(partitionData.errorCode) match {
case Errors.NONE => partitionData.offsets.asScala.head
case errorCode => throw errorCode.exception
}
}
protected def buildFetchRequest(partitionMap: Map[TopicAndPartition, PartitionFetchState]): FetchRequest = {
val requestMap = mutable.Map.empty[TopicPartition, JFetchRequest.PartitionData]
partitionMap.foreach { case ((TopicAndPartition(topic, partition), partitionFetchState)) =>
if (partitionFetchState.isActive)
requestMap(new TopicPartition(topic, partition)) = new JFetchRequest.PartitionData(partitionFetchState.offset, fetchSize)
}
new FetchRequest(new JFetchRequest(replicaId, maxWait, minBytes, requestMap.asJava))
}
}
object ReplicaFetcherThread {
private[server] class FetchRequest(val underlying: JFetchRequest) extends AbstractFetcherThread.FetchRequest {
def isEmpty: Boolean = underlying.fetchData.isEmpty
def offset(topicAndPartition: TopicAndPartition): Long =
underlying.fetchData.asScala(new TopicPartition(topicAndPartition.topic, topicAndPartition.partition)).offset
}
private[server] class PartitionData(val underlying: FetchResponse.PartitionData) extends AbstractFetcherThread.PartitionData {
def errorCode: Short = underlying.errorCode
def toByteBufferMessageSet: ByteBufferMessageSet = new ByteBufferMessageSet(underlying.recordSet)
def highWatermark: Long = underlying.highWatermark
def exception: Option[Throwable] = Errors.forCode(errorCode) match {
case Errors.NONE => None
case e => Some(e.exception)
}
}
}
|
eljefe6a/kafka
|
core/src/main/scala/kafka/server/ReplicaFetcherThread.scala
|
Scala
|
apache-2.0
| 13,832 |
import sbt._
import Keys._
import org.scalatra.sbt._
import org.scalatra.sbt.PluginKeys._
import com.mojolly.scalate.ScalatePlugin._
import ScalateKeys._
import sbtassembly.Plugin._
import sbtassembly.Plugin.AssemblyKeys._
object UschedulerBuild extends Build {
val Organization = "com.none"
val Name = "uScheduler"
val ScalaVersion = "2.10.4"
val ScalatraVersion = "2.2.2"
lazy val project = Project (
"uscheduler",
file("."),
settings = Defaults.defaultSettings ++ ScalatraPlugin.scalatraWithJRebel ++ scalateSettings ++ Seq(
organization := Organization,
name := Name,
scalaVersion := ScalaVersion,
resolvers += Classpaths.typesafeReleases,
resolvers += "Typesafe repository" at "http://repo.typesafe.com/typesafe/releases/",
libraryDependencies ++= Seq(
"org.scalatra" %% "scalatra-atmosphere" % "2.2.2",
"org.scalatra" %% "scalatra-json" % "2.2.2",
"org.json4s" %% "json4s-jackson" % "3.2.6",
"org.eclipse.jetty" % "jetty-websocket" % "8.1.10.v20130312" % "container",
"org.scalatra" %% "scalatra" % ScalatraVersion,
"org.scalatra" %% "scalatra-scalate" % ScalatraVersion,
"org.scalatra" %% "scalatra-specs2" % ScalatraVersion % "test",
"ch.qos.logback" % "logback-classic" % "1.0.6" % "runtime",
"org.eclipse.jetty" % "jetty-webapp" % "9.1.4.v20140401" % "container;compile",
"org.eclipse.jetty.orbit" % "javax.servlet" % "3.0.0.v201112011016" % "container;provided;test" artifacts (Artifact("javax.servlet", "jar", "jar"))
),
scalateTemplateConfig in Compile <<= (sourceDirectory in Compile){ base =>
Seq(
TemplateConfig(
base / "webapp" / "WEB-INF" / "templates",
Seq.empty, /* default imports should be added here */
Seq(
Binding("context", "_root_.org.scalatra.scalate.ScalatraRenderContext", importMembers = true, isImplicit = true)
), /* add extra bindings here */
Some("templates")
)
)
},
resourceGenerators in Compile <+= (resourceManaged, baseDirectory) map {
(managedBase, base) =>
val webappBase = base / "src" / "main" / "webapp"
for {
(from, to) <- webappBase ** "*" x rebase(webappBase, managedBase / "main" / "webapp")
} yield {
Sync.copy(from, to)
to
}
}
)
)
}
|
LarsHadidi/uScheduler
|
project/build.scala
|
Scala
|
apache-2.0
| 2,449 |
package pl.msitko.xml.optics
import pl.msitko.xml.BasicJvmSpec
class PlatedJvmSpec extends PlatedSpec with BasicJvmSpec
|
note/xml-lens
|
optics/jvm/src/test/scala/pl/msitko/xml/optics/PlatedJvmSpec.scala
|
Scala
|
mit
| 122 |
package scala.collection
import org.junit.runner.RunWith
import org.junit.runners.JUnit4
import org.junit.Assert._
import org.junit.Test
@RunWith(classOf[JUnit4])
class MapTest {
@Test def test: Unit = {
val map = collection.Map(
1 -> 1,
2 -> 2,
4 -> 4,
5 -> 5
)
val actual = map -- List(1, 2, 3)
val expected = collection.Map(
4 -> 4,
5 -> 5
)
assertEquals(expected, actual)
}
@Test def mkString(): Unit = {
assert(Map().mkString == "")
assert(Map(1 -> 1).mkString(",") == "1 -> 1")
assert(Map(1 -> 1, 2 -> 2).mkString(",") == "1 -> 1,2 -> 2")
}
@Test def addString(): Unit = {
assert(Map().addString(new StringBuilder).toString == "")
assert(Map(1 -> 1).addString(new StringBuilder).toString == "1 -> 1")
assert(Map(1 -> 1, 2 -> 2).mkString("foo [", ", ", "] bar").toString ==
"foo [1 -> 1, 2 -> 2] bar")
}
@Test def t11188(): Unit = {
import scala.collection.immutable.ListMap
val m = ListMap(1 -> "one")
val mm = Map(2 -> "two") ++: m
assert(mm.isInstanceOf[ListMap[Int,String]])
assertEquals(mm.mkString("[", ", ", "]"), "[1 -> one, 2 -> two]")
}
@Test def deprecatedPPE(): Unit = {
val m = (1 to 10).map(x => (x, x)).toMap
val m1 = m ++: m
assertEquals(m.toList.sorted, (m1: Map[Int, Int]).toList.sorted)
val s1 = List(1) ++: m
assertEquals(1 :: m.toList.sorted, (s1: Iterable[Any]).toList.sortBy({case (x: Int, _) => x; case x: Int => x}))
}
@Test
def flatMapOption(): Unit = {
def f(p: (Int, Int)) = if (p._1 < p._2) Some(p._1, p._2) else None
val m = (1 to 10).zip(11 to 20).toMap
val m2 = m.flatMap(f)
(m2: Map[Int, Int]).head
val m3 = m.flatMap(p => Some(p))
(m3: Map[Int, Int]).head
val m4 = m.flatMap(_ => Some(3))
(m4: Iterable[Int]).head
}
@Test
def t11589(): Unit = {
// tests the strictness of Map#values
def check(m: collection.Map[Int, Int]): Unit = {
def checkImmutable[K, V](m: immutable.Map[Int, Int]): Unit = {
var i = 0
m.withDefault(_ => -1).values.map{v => i = 1; v}
assertEquals(1, i)
i = 0
m.withDefaultValue(-1).values.map{v => i = 1; v}
assertEquals(1, i)
}
var i = 0
m.values.map{v => i = 1; v}
assertEquals(1, i)
m match {
case im: immutable.Map[Int, Int] =>
checkImmutable(im)
case _ =>
()
}
}
check(collection.Map(1 -> 1))
check(immutable.Map(1 -> 1))
check(mutable.Map(1 -> 1))
check(collection.SortedMap(1 -> 1))
check(immutable.SortedMap(1 -> 1))
check(mutable.SortedMap(1 -> 1))
check(immutable.HashMap(1 -> 1))
check(mutable.HashMap(1 -> 1))
check(immutable.TreeMap(1 -> 1))
check(mutable.TreeMap(1 -> 1))
check(immutable.SeqMap(1 -> 1))
check(mutable.SeqMap(1 -> 1))
check(immutable.ListMap(1 -> 1))
check(mutable.ListMap(1 -> 1))
check(immutable.VectorMap(1 -> 1))
check(immutable.TreeSeqMap(1 -> 1))
check(mutable.LinkedHashMap(1 -> 1))
check(mutable.OpenHashMap(1 -> 1))
check(mutable.CollisionProofHashMap(1 -> 1))
}
}
|
martijnhoekstra/scala
|
test/junit/scala/collection/MapTest.scala
|
Scala
|
apache-2.0
| 3,196 |
/* Copyright 2009-2016 EPFL, Lausanne */
package leon
package genc
import utils.{ NoPosition, Position }
/*
* The MiniReporter trait is a simple convinient trait that provides
* overloading and shorthands for the usual reporter from LeonContext.
*
* The MiniReporter object simply provides a way to create an instance
* of the same trait. It is useful if one whish to "import" the shorthands
* into the current scope without using inheritance. E.g.:
*
* val reporter = MiniReporter(ctx)
* import reporter._
*/
private[genc] object MiniReporter {
def apply(ctx0: LeonContext) = new MiniReporter { val ctx = ctx0 }
}
private[genc] trait MiniReporter {
protected val ctx: LeonContext
def internalError(msg: String, cause: Throwable = null) = {
import java.lang.Thread
val stack = Thread.currentThread.getStackTrace
debug(s"internal error `$msg` from:")
for (s <- stack)
debug(s.toString)
if (cause != null) {
debug("because of")
debug(cause)
}
ctx.reporter.internalError(msg)
}
def fatalError(msg: String, pos: Position = NoPosition) = ctx.reporter.fatalError(pos, msg)
def debugTree(title: String, tree: ir.IR#Tree) = {
if (ctx.reporter.isDebugEnabled(utils.DebugSectionTrees)) {
ctx.reporter.info("\\n")
ctx.reporter.info(utils.ASCIIHelpers.title(title))
ctx.reporter.info("\\n")
ctx.reporter.info(tree.toString)
ctx.reporter.info("\\n")
}
}
def debug(msg: String) = ctx.reporter.debug(msg)(utils.DebugSectionGenC)
def debug(e: Throwable) {
import java.io.{ StringWriter, PrintWriter }
val msg = e.getMessage
if (msg != null)
debug(e.getMessage)
val sw = new StringWriter();
e.printStackTrace(new PrintWriter(sw));
val stack = sw.toString();
debug("Exception's stack trace:\\n " + stack)
val cause = e.getCause
if (cause != null) {
debug("because of")
debug(cause)
}
}
def debug(msg: String, tree: CAST.Tree): Unit = debug(msg + ": " + tree2String(tree) + " of " + tree.getClass)
def debug(tree: CAST.Tree): Unit = debug(tree2String(tree) + " of " + tree.getClass)
private def tree2String(tree: CAST.Tree): String = {
val p = new CPrinter()
p.print(tree)
p.toString
}
def warning(msg: String, pos: Position = NoPosition) = ctx.reporter.warning(pos, msg)
}
|
regb/leon
|
src/main/scala/leon/genc/MiniReporter.scala
|
Scala
|
gpl-3.0
| 2,370 |
package structure.monad
// This example is based off the one in Runar Bjarnason's "Dead Simple Dependency Injection" talk.
// http://www.youtube.com/watch?v=ZasXwtTRkio
object FreeMonadEx1 {
sealed trait Interact[A]
case class Ask(prompt: String) extends Interact[String]
case class Tell(msg: String) extends Interact[Unit]
trait Monad[M[_]] {
def pure[A](a: A): M[A]
def flatMap[A, B](a: M[A])(f: A => M[B]): M[B]
}
object Monad {
def apply[F[_]: Monad]: Monad[F] = implicitly[Monad[F]]
}
sealed trait ~>[F[_], G[_]] { self =>
def apply[A](f: F[A]): G[A]
def or[H[_]](f: H ~> G): ({ type f[x] = Coproduct[F, H, x] })#f ~> G =
new (({ type f[x] = Coproduct[F, H, x] })#f ~> G) {
def apply[A](c: Coproduct[F, H, A]): G[A] = c.run match {
case Left(fa) => self(fa)
case Right(ha) => f(ha)
}
}
}
sealed trait Free[F[_], A] {
def flatMap[B](f: A => Free[F, B]): Free[F, B] =
this match {
case Return(a) => f(a)
case Bind(fx, g) =>
Bind(fx, g andThen (_ flatMap f))
}
def map[B](f: A => B): Free[F, B] = flatMap(a => Return(f(a)))
def foldMap[G[_]: Monad](f: F ~> G): G[A] =
this match {
case Return(a) => Monad[G].pure(a)
case Bind(fx, g) =>
Monad[G].flatMap(f(fx)) { a =>
g(a).foldMap(f)
}
}
}
case class Return[F[_], A](a: A) extends Free[F, A]
case class Bind[F[_], I, A](a: F[I], f: I => Free[F, A]) extends Free[F, A]
//implicit def lift[F[_],A](f: F[A]): Free[F,A] =
// Bind(f, (a: A) => Return(a))
//val prg = for {
// first <- Ask("What’s your first name?")
// last <- Ask("What's your last name?")
// _ <- Tell(s"Hello, $first, $last!")
//} yield ()
type Id[A] = A
implicit val identityMonad: Monad[Id] = new Monad[Id] {
def pure[A](a: A) = a
def flatMap[A, B](a: A)(f: A => B) = f(a)
}
object Console extends (Interact ~> Id) {
def apply[A](i: Interact[A]) = i match {
case Ask(prompt) =>
println(prompt)
readLine
case Tell(msg) =>
println(msg)
}
}
type Tester[A] = Map[String, String] => (List[String], A)
object TestConsole extends (Interact ~> Tester) {
def apply[A](i: Interact[A]) = i match {
case Ask(prompt) => m => (List(), m(prompt))
case Tell(msg) => _ => (List(msg), ())
}
}
implicit val testerMonad = new Monad[Tester] {
def pure[A](a: A) = _ => (List(), a)
def flatMap[A, B](t: Tester[A])(f: A => Tester[B]) =
m => {
val (o1, a) = t(m)
val (o2, b) = f(a)(m)
(o1 ++ o2, b)
}
}
type UserID = String
type Password = String
type Permission = String
case class User(id: String)
sealed trait Auth[A]
case class Login(u: UserID, p: Password) extends Auth[Option[User]]
case class HasPermission(
u: User, p: Permission
) extends Auth[Boolean]
case class Coproduct[F[_], G[_], A](run: Either[F[A], G[A]])
sealed trait Inject[F[_], G[_]] {
def inj[A](sub: F[A]): G[A]
def prj[A](sup: G[A]): Option[F[A]]
}
object Inject {
implicit def injRefl[F[_]] = new Inject[F, F] {
def inj[A](sub: F[A]) = sub
def prj[A](sup: F[A]) = Some(sup)
}
implicit def injLeft[F[_], G[_]] = new Inject[F, ({ type λ[α] = Coproduct[F, G, α] })#λ] {
def inj[A](sub: F[A]) = Coproduct(Left(sub))
def prj[A](sup: Coproduct[F, G, A]) = sup.run match {
case Left(fa) => Some(fa)
case Right(_) => None
}
}
implicit def injRight[F[_], G[_], H[_]](implicit I: Inject[F, G]) =
new Inject[F, ({ type f[x] = Coproduct[H, G, x] })#f] {
def inj[A](sub: F[A]) = Coproduct(Right(I.inj(sub)))
def prj[A](sup: Coproduct[H, G, A]) = sup.run match {
case Left(_) => None
case Right(x) => I.prj(x)
}
}
}
def lift[F[_], G[_], A](f: F[A])(implicit I: Inject[F, G]): Free[G, A] =
Bind(I.inj(f), Return(_: A))
class Interacts[F[_]](implicit I: Inject[Interact, F]) {
def tell(msg: String): Free[F, Unit] = lift(Tell(msg))
def ask(prompt: String): Free[F, String] = lift(Ask(prompt))
}
class Auths[F[_]](implicit I: Inject[Auth, F]) {
def login(id: UserID, pwd: Password): Free[F, Option[User]] =
lift(Login(id, pwd))
def hasPermission(u: User, p: Permission): Free[F, Boolean] =
lift(HasPermission(u, p))
}
object Auths {
implicit def instance[F[_]](implicit I: Inject[Auth, F]): Auths[F] = new Auths[F]
}
object Interacts {
implicit def instance[F[_]](implicit I: Inject[Interact, F]): Interacts[F] = new Interacts[F]
}
val KnowSecret = "KnowSecret"
def prg[F[_]](implicit I: Interacts[F], A: Auths[F]) = {
import I._; import A._
for {
uid <- ask("What's your user ID?")
pwd <- ask("Password, please.")
u <- login(uid, pwd)
b <- u.map(hasPermission(_, KnowSecret)).getOrElse(Return(false))
_ <- if (b) tell("UUDDLRLRBA") else tell("Go away!")
} yield ()
}
type App[A] = Coproduct[Auth, Interact, A]
val app: Free[App, Unit] = prg[App]
val TestAuth: Auth ~> Id = new (Auth ~> Id) {
def apply[A](a: Auth[A]) = a match {
case Login(uid, pwd) =>
if (uid == "john.snow" && pwd == "Ghost")
Some(User("john.snow"))
else None
case HasPermission(u, _) =>
u.id == "john.snow"
}
}
def runApp = app.foldMap(TestAuth or Console)
}
object FreeMonadExample1 extends App {
}
|
adilakhter/scalaznoob
|
src/main/scala/structure/monad/Free.scala
|
Scala
|
apache-2.0
| 5,561 |
package com.twitter.finagle.zipkin.thrift
import com.twitter.finagle.tracing._
import com.twitter.util.Time
import com.twitter.util.events.Sink
import org.junit.runner.RunWith
import org.mockito.Mockito._
import org.scalatest.FunSuite
import org.scalatest.junit.JUnitRunner
import org.scalatest.mock.MockitoSugar
@RunWith(classOf[JUnitRunner])
class SamplingTracerTest extends FunSuite
with MockitoSugar
{
private val traceId = TraceId(
None,
None,
new SpanId(1L),
None,
Flags()
)
private val record = Record(
traceId,
Time.now,
Annotation.Message("sup"),
None
)
test("sends sampled events to Sink") {
val sink = mock[Sink]
val tracer = mock[Tracer]
val samplingTracer = new SamplingTracer(tracer, 1f, sink)
samplingTracer.record(record)
verify(sink, times(1)).event(ZipkinTracer.Trace, objectVal = record.annotation)
}
test("does not send events to sink when not sampled") {
val sink = mock[Sink]
val tracer = mock[Tracer]
val samplingTracer = new SamplingTracer(tracer, 0f, sink)
samplingTracer.record(record)
verifyNoMoreInteractions(sink)
}
}
|
kingtang/finagle
|
finagle-zipkin/src/test/scala/com/twitter/finagle/zipkin/thrift/SamplingTracerTest.scala
|
Scala
|
apache-2.0
| 1,150 |
package mesosphere.marathon.event.http
import akka.actor.{ Actor, ActorLogging }
import akka.pattern.pipe
import mesosphere.marathon.event.{
MarathonSubscriptionEvent,
Unsubscribe,
Subscribe
}
import mesosphere.marathon.event.http.SubscribersKeeperActor._
import mesosphere.marathon.state.MarathonStore
import scala.concurrent.Future
class SubscribersKeeperActor(val store: MarathonStore[EventSubscribers]) extends Actor with ActorLogging {
implicit val ec = HttpEventModule.executionContext
override def receive: Receive = {
case event @ Subscribe(_, callbackUrl, _, _) =>
val addResult: Future[Option[EventSubscribers]] = add(callbackUrl)
val subscription: Future[MarathonSubscriptionEvent] =
addResult.collect {
case Some(subscribers) =>
if (subscribers.urls.contains(callbackUrl))
log.info("Callback [%s] subscribed." format callbackUrl)
event
}
subscription pipeTo sender()
case event @ Unsubscribe(_, callbackUrl, _, _) =>
val removeResult: Future[Option[EventSubscribers]] = remove(callbackUrl)
val subscription: Future[MarathonSubscriptionEvent] =
removeResult.collect {
case Some(subscribers) =>
if (!subscribers.urls.contains(callbackUrl))
log.info("Callback [%s] unsubscribed." format callbackUrl)
event
}
subscription pipeTo sender()
case GetSubscribers =>
val subscription = store.fetch(Subscribers).map(_.getOrElse(EventSubscribers()))
subscription pipeTo sender()
}
protected[this] def add(callbackUrl: String): Future[Option[EventSubscribers]] =
store.modify(Subscribers) { deserialize =>
val existingSubscribers = deserialize()
if (existingSubscribers.urls.contains(callbackUrl)) {
log.info("Existing callback [%s] resubscribed." format callbackUrl)
existingSubscribers
}
else EventSubscribers(existingSubscribers.urls + callbackUrl)
}
protected[this] def remove(callbackUrl: String): Future[Option[EventSubscribers]] =
store.modify(Subscribers) { deserialize =>
val existingSubscribers = deserialize()
if (existingSubscribers.urls.contains(callbackUrl))
EventSubscribers(existingSubscribers.urls - callbackUrl)
else {
log.warning("Attempted to unsubscribe nonexistent callback [%s]." format callbackUrl)
existingSubscribers
}
}
}
object SubscribersKeeperActor {
case object GetSubscribers
final val Subscribers = "http_event_subscribers"
}
|
14Zen/marathon
|
src/main/scala/mesosphere/marathon/event/http/SubscribersKeeperActor.scala
|
Scala
|
apache-2.0
| 2,589 |
package org.mozartoz.bootcompiler
package symtab
/** Companion object for Builtin */
object Builtin {
/** Parameter kind */
object ParamKind extends Enumeration {
val In, Out = Value
}
/** Parameter kind */
type ParamKind = ParamKind.Value
def create(moduleName: String, name: String, arity: Int) = new Builtin(moduleName, name, arity)
}
/** Builtin procedure of the host VM */
class Builtin(val moduleName: String, val name: String, val arity: Int) {
override def toString() =
moduleName + "." + (if (name.charAt(0).isLetter) name else "'" + name + "'")
}
|
eregon/mozart-graal
|
bootcompiler/src/main/scala/org/mozartoz/bootcompiler/symtab/Builtin.scala
|
Scala
|
bsd-2-clause
| 583 |
/*
* Copyright 2011-2022 GatlingCorp (https://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.mqtt.protocol
import io.gatling.core.session.Expression
import io.netty.handler.codec.mqtt.MqttQoS
object LastWillBuilder {
def apply(topic: Expression[String], message: Expression[Array[Byte]]): LastWillBuilder =
new LastWillBuilder(topic, message, qosOverride = None, retainOverride = None)
}
final case class LastWillBuilder(
topic: Expression[String],
message: Expression[Array[Byte]],
qosOverride: Option[MqttQoS],
retainOverride: Option[Boolean]
) {
def qosAtMostOnce: LastWillBuilder = qos(MqttQoS.AT_MOST_ONCE)
def qosAtLeastOnce: LastWillBuilder = qos(MqttQoS.AT_LEAST_ONCE)
def qosExactlyOnce: LastWillBuilder = qos(MqttQoS.EXACTLY_ONCE)
private def qos(newQos: MqttQoS): LastWillBuilder = copy(qosOverride = Some(newQos))
def retain(newRetain: Boolean): LastWillBuilder = copy(retainOverride = Some(newRetain))
def build: LastWill = LastWill(topic, message, qosOverride, retainOverride)
}
final case class LastWill(topic: Expression[String], message: Expression[Array[Byte]], qosOverride: Option[MqttQoS], retainOverride: Option[Boolean])
|
gatling/gatling
|
gatling-mqtt/src/main/scala/io/gatling/mqtt/protocol/LastWill.scala
|
Scala
|
apache-2.0
| 1,728 |
/*
* SPDX-License-Identifier: Apache-2.0
*
* Copyright 2015-2021 Andre White.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.truthencode.ddo.model.feats
import io.truthencode.ddo.model.classes.HeroicCharacterClass
import io.truthencode.ddo.model.classes.HeroicCharacterClass.Warlock
import io.truthencode.ddo.support.requisite.{FeatRequisiteImpl, GrantsToClass, RequiresAllOfClass}
/**
* +1 to Fortitude Saves
*/
protected[feats] trait FiendishResilience
extends FeatRequisiteImpl with Passive with RequiresAllOfClass with GrantsToClass {
self: ClassFeat =>
override def grantToClass: Seq[(HeroicCharacterClass, Int)] = List((Warlock, 4))
}
|
adarro/ddo-calc
|
subprojects/common/ddo-core/src/main/scala/io/truthencode/ddo/model/feats/FiendishResilience.scala
|
Scala
|
apache-2.0
| 1,214 |
//package io.youi.example.ui.hypertext
//
//import io.youi._
//import io.youi.hypertext.style.Image
//import io.youi.hypertext.{ImageView, TextInput}
//import io.youi.util.ImageUtility
//import org.scalajs.dom.Event
//import reactify._
//
//import scala.concurrent.Future
//
//object PreviewImageExample extends HTMLScreen {
// override def name: String = "Preview Image"
// override def path: String = "/examples/html/preview-image.html"
//
// override protected def load(): Future[Unit] = super.load().map { _ =>
// val image = new ImageView {
// position.center := ui.center
// position.middle := ui.middle
// }
// val fileUpload = new TextInput {
// position.center := ui.center
// position.top := image.position.bottom + 10.0
// element.`type` = "file"
// element.addEventListener("change", (_: Event) => {
// if (element.files.length > 0) {
// val file = element.files(0)
// ImageUtility.generatePreview(file, 1024.0, 768.0).foreach {
// case Some(dataURL) => {
// image.image := Image(dataURL)
// }
// case None => scribe.warn(s"Unable to generate preview for ${file.name}.")
// }
// }
// })
// }
// container.children += image
// container.children += fileUpload
// }
//}
|
outr/youi
|
example/js/src/main/scala/io/youi/example/ui/hypertext/PreviewImageExample.scala
|
Scala
|
mit
| 1,323 |
package dotty.tools.dotc
package transform
package sjs
import dotty.tools.dotc.ast.tpd
import dotty.tools.dotc.core._
import Contexts._
import Decorators._
import Denotations._
import Flags._
import NameKinds.DefaultGetterName
import StdNames._
import Symbols._
import SymUtils._
import Types._
import util.Spans.Span
import util.SrcPos
import dotty.tools.backend.sjs.JSDefinitions.jsdefn
import JSExportUtils._
import JSSymUtils._
import org.scalajs.ir.Names.DefaultModuleID
import org.scalajs.ir.Trees.TopLevelExportDef.isValidTopLevelExportName
object PrepJSExports {
import tpd._
import PrepJSInterop.{checkSetterSignature, isJSAny, isPrivateMaybeWithin}
private sealed abstract class ExportDestination
private object ExportDestination {
/** Export in the "normal" way: as an instance member, or at the top-level
* for naturally top-level things (classes and modules).
*/
case object Normal extends ExportDestination
/** Export at the top-level. */
final case class TopLevel(moduleID: String) extends ExportDestination
/** Export as a static member of the companion class. */
case object Static extends ExportDestination
}
private final case class ExportInfo(jsName: String, destination: ExportDestination)(val pos: SrcPos)
/** Checks a class or module class for export.
*
* Note that non-module Scala classes are never actually exported; their constructors are.
* However, the checks are performed on the class when the class is annotated.
*/
def checkClassOrModuleExports(sym: Symbol)(using Context): Unit = {
val exports = exportsOf(sym)
if (exports.nonEmpty)
checkClassOrModuleExports(sym, exports.head.pos)
}
/** Generate the exporter for the given DefDef or ValDef.
*
* If this DefDef is a constructor, it is registered to be exported by
* GenJSCode instead and no trees are returned.
*/
def genExportMember(baseSym: Symbol)(using Context): List[Tree] = {
val clsSym = baseSym.owner
val exports = exportsOf(baseSym)
// Helper function for errors
def err(msg: String): List[Tree] = {
report.error(msg, exports.head.pos)
Nil
}
def memType = if (baseSym.isConstructor) "constructor" else "method"
if (exports.isEmpty) {
Nil
} else if (!hasLegalExportVisibility(baseSym)) {
err(s"You may only export public and protected ${memType}s")
} else if (baseSym.is(Inline)) {
err("You may not export an inline method")
} else if (isJSAny(clsSym)) {
err(s"You may not export a $memType of a subclass of js.Any")
} else if (baseSym.isLocalToBlock) {
err("You may not export a local definition")
} else if (hasIllegalRepeatedParam(baseSym)) {
err(s"In an exported $memType, a *-parameter must come last (through all parameter lists)")
} else if (hasIllegalDefaultParam(baseSym)) {
err(s"In an exported $memType, all parameters with defaults must be at the end")
} else if (baseSym.isConstructor) {
// Constructors do not need an exporter method. We only perform the checks at this phase.
checkClassOrModuleExports(clsSym, exports.head.pos)
Nil
} else {
assert(!baseSym.is(Bridge), s"genExportMember called for bridge symbol $baseSym")
val normalExports = exports.filter(_.destination == ExportDestination.Normal)
normalExports.flatMap(exp => genExportDefs(baseSym, exp.jsName, exp.pos.span))
}
}
/** Check a class or module for export.
*
* There are 2 ways that this method can be reached:
* - via `registerClassExports`
* - via `genExportMember` (constructor of Scala class)
*/
private def checkClassOrModuleExports(sym: Symbol, errPos: SrcPos)(using Context): Unit = {
val isMod = sym.is(ModuleClass)
def err(msg: String): Unit =
report.error(msg, errPos)
def hasAnyNonPrivateCtor: Boolean =
sym.info.decl(nme.CONSTRUCTOR).hasAltWith(denot => !isPrivateMaybeWithin(denot.symbol))
if (sym.is(Trait)) {
err("You may not export a trait")
} else if (sym.hasAnnotation(jsdefn.JSNativeAnnot)) {
err("You may not export a native JS " + (if (isMod) "object" else "class"))
} else if (!hasLegalExportVisibility(sym)) {
err("You may only export public and protected " + (if (isMod) "objects" else "classes"))
} else if (isJSAny(sym.owner)) {
err("You may not export a " + (if (isMod) "object" else "class") + " in a subclass of js.Any")
} else if (sym.isLocalToBlock) {
err("You may not export a local " + (if (isMod) "object" else "class"))
} else if (!sym.isStatic) {
if (isMod)
err("You may not export a nested object")
else
err("You may not export a nested class. Create an exported factory method in the outer class to work around this limitation.")
} else if (sym.is(Abstract, butNot = Trait) && !isJSAny(sym)) {
err("You may not export an abstract class")
} else if (!isMod && !hasAnyNonPrivateCtor) {
/* This test is only relevant for JS classes but doesn't hurt for Scala
* classes as we could not reach it if there were only private
* constructors.
*/
err("You may not export a class that has only private constructors")
} else {
// OK
}
}
/** Computes the ExportInfos for sym from its annotations. */
private def exportsOf(sym: Symbol)(using Context): List[ExportInfo] = {
val trgSym = {
def isOwnerScalaClass = !sym.owner.is(ModuleClass) && !isJSAny(sym.owner)
// For primary Scala class constructors, look on the class itself
if (sym.isPrimaryConstructor && isOwnerScalaClass) sym.owner
else sym
}
val JSExportAnnot = jsdefn.JSExportAnnot
val JSExportTopLevelAnnot = jsdefn.JSExportTopLevelAnnot
val JSExportStaticAnnot = jsdefn.JSExportStaticAnnot
val JSExportAllAnnot = jsdefn.JSExportAllAnnot
// Annotations that are directly on the member
val directMemberAnnots = Set[Symbol](JSExportAnnot, JSExportTopLevelAnnot, JSExportStaticAnnot)
val directAnnots = trgSym.annotations.filter(annot => directMemberAnnots.contains(annot.symbol))
// Is this a member export (i.e. not a class or module export)?
val isMember = !sym.isClass && !sym.isConstructor
// Annotations for this member on the whole unit
val unitAnnots = {
if (isMember && sym.isPublic && !sym.is(Synthetic))
sym.owner.annotations.filter(_.symbol == JSExportAllAnnot)
else
Nil
}
val allExportInfos = for {
annot <- directAnnots ++ unitAnnots
} yield {
val isExportAll = annot.symbol == JSExportAllAnnot
val isTopLevelExport = annot.symbol == JSExportTopLevelAnnot
val isStaticExport = annot.symbol == JSExportStaticAnnot
val hasExplicitName = annot.arguments.nonEmpty
val exportPos: SrcPos = if (isExportAll) sym else annot.tree
assert(!isTopLevelExport || hasExplicitName,
em"Found a top-level export without an explicit name at ${exportPos.sourcePos}")
val name = {
if (hasExplicitName) {
annot.argumentConstantString(0).getOrElse {
report.error(
s"The argument to ${annot.symbol.name} must be a literal string",
annot.arguments(0))
"dummy"
}
} else {
sym.defaultJSName
}
}
val destination = {
if (isTopLevelExport) {
val moduleID = if (annot.arguments.size == 1) {
DefaultModuleID
} else {
annot.argumentConstantString(1).getOrElse {
report.error("moduleID must be a literal string", annot.arguments(1))
DefaultModuleID
}
}
ExportDestination.TopLevel(moduleID)
} else if (isStaticExport) {
ExportDestination.Static
} else {
ExportDestination.Normal
}
}
// Enforce proper setter signature
if (sym.isJSSetter)
checkSetterSignature(sym, exportPos, exported = true)
// Enforce no __ in name
if (!isTopLevelExport && name.contains("__"))
report.error("An exported name may not contain a double underscore (`__`)", exportPos)
/* Illegal function application exports, i.e., method named 'apply'
* without an explicit export name.
*/
if (isMember && !hasExplicitName && sym.name == nme.apply) {
destination match {
case ExportDestination.Normal =>
def shouldBeTolerated = {
isExportAll && directAnnots.exists { annot =>
annot.symbol == JSExportAnnot &&
annot.arguments.nonEmpty &&
annot.argumentConstantString(0).contains("apply")
}
}
// Don't allow apply without explicit name
if (!shouldBeTolerated) {
report.error(
"A member cannot be exported to function application. " +
"Add @JSExport(\\"apply\\") to export under the name apply.",
exportPos)
}
case _: ExportDestination.TopLevel =>
throw new AssertionError(
em"Found a top-level export without an explicit name at ${exportPos.sourcePos}")
case ExportDestination.Static =>
report.error(
"A member cannot be exported to function application as static. " +
"Use @JSExportStatic(\\"apply\\") to export it under the name 'apply'.",
exportPos)
}
}
val symOwner =
if (sym.isConstructor) sym.owner.owner
else sym.owner
// Destination-specific restrictions
destination match {
case ExportDestination.Normal =>
// Make sure we do not override the default export of toString
def isIllegalToString = {
isMember && name == "toString" && sym.name != nme.toString_ &&
sym.info.paramInfoss.forall(_.isEmpty) && !sym.isJSGetter
}
if (isIllegalToString) {
report.error(
"You may not export a zero-argument method named other than 'toString' under the name 'toString'",
exportPos)
}
// Disallow @JSExport at the top-level, as well as on objects and classes
if (symOwner.is(Package) || symOwner.isPackageObject) {
report.error("@JSExport is forbidden on top-level definitions. Use @JSExportTopLevel instead.", exportPos)
} else if (!isMember && !sym.is(Trait)) {
report.error(
"@JSExport is forbidden on objects and classes. Use @JSExport'ed factory methods instead.",
exportPos)
}
case _: ExportDestination.TopLevel =>
if (sym.is(Lazy))
report.error("You may not export a lazy val to the top level", exportPos)
else if (sym.is(Method, butNot = Accessor) && sym.isJSProperty)
report.error("You may not export a getter or a setter to the top level", exportPos)
/* Disallow non-static methods.
* Note: Non-static classes have more specific error messages in checkClassOrModuleExports.
*/
if (sym.isTerm && (!symOwner.isStatic || !symOwner.is(ModuleClass)))
report.error("Only static objects may export their members to the top level", exportPos)
// The top-level name must be a valid JS identifier
if (!isValidTopLevelExportName(name))
report.error("The top-level export name must be a valid JavaScript identifier name", exportPos)
case ExportDestination.Static =>
def companionIsNonNativeJSClass: Boolean = {
val companion = symOwner.companionClass
companion != NoSymbol
&& !companion.is(Trait)
&& isJSAny(companion)
&& !companion.hasAnnotation(jsdefn.JSNativeAnnot)
}
if (!symOwner.isStatic || !symOwner.is(ModuleClass) || !companionIsNonNativeJSClass) {
report.error(
"Only a static object whose companion class is a non-native JS class may export its members as static.",
exportPos)
}
if (isMember) {
if (sym.is(Lazy))
report.error("You may not export a lazy val as static", exportPos)
} else {
if (sym.is(Trait))
report.error("You may not export a trait as static.", exportPos)
else
report.error("Implementation restriction: cannot export a class or object as static", exportPos)
}
}
ExportInfo(name, destination)(exportPos)
}
allExportInfos.filter(_.destination == ExportDestination.Normal)
.groupBy(_.jsName)
.filter { case (jsName, group) =>
if (jsName == "apply" && group.size == 2)
// @JSExportAll and single @JSExport("apply") should not be warned.
!unitAnnots.exists(_.symbol == JSExportAllAnnot)
else
group.size > 1
}
.foreach(_ => report.warning("Found duplicate @JSExport", sym))
/* Make sure that no field is exported *twice* as static, nor both as
* static and as top-level (it is possible to export a field several times
* as top-level, though).
*/
if (!sym.is(Method)) {
for (firstStatic <- allExportInfos.find(_.destination == ExportDestination.Static)) {
for (duplicate <- allExportInfos) {
duplicate.destination match {
case ExportDestination.Normal =>
// OK
case ExportDestination.Static =>
if (duplicate ne firstStatic) {
report.error(
"Fields (val or var) cannot be exported as static more than once",
duplicate.pos)
}
case _: ExportDestination.TopLevel =>
report.error(
"Fields (val or var) cannot be exported both as static and at the top-level",
duplicate.pos)
}
}
}
}
allExportInfos.distinct
}
/** Generates an exporter for a DefDef including default parameter methods. */
private def genExportDefs(defSym: Symbol, jsName: String, span: Span)(using Context): List[Tree] = {
val clsSym = defSym.owner.asClass
// Create symbol for new method
val name = makeExportName(jsName, !defSym.is(Method) || defSym.isJSProperty)
val flags = (defSym.flags | Method | Synthetic)
&~ (Deferred | Accessor | ParamAccessor | CaseAccessor | Mutable | Lazy | Override)
val info =
if (defSym.isConstructor) defSym.info
else if (defSym.is(Method)) finalResultTypeToAny(defSym.info)
else ExprType(defn.AnyType)
val expSym = newSymbol(clsSym, name, flags, info, defSym.privateWithin, span).entered
// Construct exporter DefDef tree
val exporter = genProxyDefDef(clsSym, defSym, expSym, span)
// Construct exporters for default getters
val defaultGetters = if (!defSym.hasDefaultParams) {
Nil
} else {
for {
(param, i) <- defSym.paramSymss.flatten.zipWithIndex
if param.is(HasDefault)
} yield {
genExportDefaultGetter(clsSym, defSym, expSym, i, span)
}
}
exporter :: defaultGetters
}
private def genExportDefaultGetter(clsSym: ClassSymbol, trgMethod: Symbol,
exporter: Symbol, paramPos: Int, span: Span)(using Context): Tree = {
// Get default getter method we'll copy
val trgGetterDenot = defaultGetterDenot(clsSym, trgMethod, paramPos)
assert(trgGetterDenot.exists, em"Cannot find default getter for param $paramPos of $trgMethod")
// Although the following must be true in a correct program, we cannot
// assert, since a graceful failure message is only generated later
if (!trgGetterDenot.isOverloaded) {
val trgGetter = trgGetterDenot.symbol
val expGetterName = DefaultGetterName(exporter.name.asTermName, paramPos)
val expGetter = newSymbol(clsSym, expGetterName, trgGetter.flags, trgGetter.info,
trgGetter.privateWithin, coord = span).entered
genProxyDefDef(clsSym, trgGetter, expGetter, span)
} else {
EmptyTree
}
}
private def defaultGetterDenot(targetSym: Symbol, sym: Symbol, paramIndex: Int)(using Context): Denotation =
targetSym.info.member(DefaultGetterName(sym.name.asTermName, paramIndex))
/** generate a DefDef tree (from [[proxySym]]) that calls [[trgSym]] */
private def genProxyDefDef(clsSym: ClassSymbol, trgSym: Symbol,
proxySym: TermSymbol, span: Span)(using Context): Tree = {
DefDef(proxySym, { argss =>
This(clsSym).select(trgSym).appliedToArgss(argss)
}).withSpan(span)
}
/** Changes the final result type of a type `tpe` to Any. */
private def finalResultTypeToAny(tpe: Type)(using Context): Type = tpe match {
case tpe: MethodType =>
MethodType(tpe.paramNames, tpe.paramInfos, finalResultTypeToAny(tpe.resultType))
case _: ExprType =>
ExprType(defn.AnyType)
case tpe: PolyType =>
PolyType(tpe.paramNames)(
x => tpe.paramInfos.mapConserve(_.subst(tpe, x).bounds),
x => finalResultTypeToAny(tpe.resultType.subst(tpe, x)))
case _ =>
defn.AnyType
}
/** Whether the given symbol has a visibility that allows exporting */
private def hasLegalExportVisibility(sym: Symbol)(using Context): Boolean =
sym.isPublic || sym.is(Protected, butNot = Local)
/** Checks whether this type has a repeated parameter elsewhere than at the end of all the params. */
private def hasIllegalRepeatedParam(sym: Symbol)(using Context): Boolean = {
val paramInfos = sym.info.paramInfoss.flatten
paramInfos.nonEmpty && paramInfos.init.exists(_.isRepeatedParam)
}
/** Checks whether there are default parameters not at the end of the flattened parameter list. */
private def hasIllegalDefaultParam(sym: Symbol)(using Context): Boolean = {
sym.hasDefaultParams
&& sym.paramSymss.flatten.reverse.dropWhile(_.is(HasDefault)).exists(_.is(HasDefault))
}
}
|
lampepfl/dotty
|
compiler/src/dotty/tools/dotc/transform/sjs/PrepJSExports.scala
|
Scala
|
apache-2.0
| 18,190 |
/*
* Copyright (C) 2017 LREN CHUV for Human Brain Project
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package ch.chuv.lren.woken
/**
* Adapters (in the sense of [[http://alistair.cockburn.us/Hexagonal+architecture Hexagonal architecture]])
* that dispatch the queries either to core actors or to remote Woken instances.
*/
package object dispatch {}
|
HBPSP8Repo/workflow
|
src/main/scala/ch/chuv/lren/woken/dispatch/package.scala
|
Scala
|
apache-2.0
| 990 |
package com.soteradefense.datawake.trails.bolts
import java.util
import java.util.Properties
import backtype.storm.task.TopologyContext
import backtype.storm.topology.OutputFieldsDeclarer
import backtype.storm.topology.base.BaseBasicBolt
import kafka.producer.{Producer, ProducerConfig}
import org.slf4j.{Logger, LoggerFactory}
/**
* Abstract class that creates a kafka producer
* @param brokers Comma-separated list of kafka brokers
* @param topic The topic to write to.
*/
abstract class HighLevelKafkaProducer(brokers: String, topic: String) extends BaseBasicBolt {
var kafkaProducer: Producer[String, String] = null
var logger: Logger = null
var kafkaBrokers: String = null
override def prepare(stormConf: util.Map[_, _], context: TopologyContext): Unit = {
super.prepare(stormConf, context)
kafkaBrokers = brokers
kafkaProducer = new Producer[String, String](createProducerConfig)
logger = LoggerFactory.getLogger(this.getClass)
}
override def declareOutputFields(declarer: OutputFieldsDeclarer): Unit = {
//Do Nothing
}
/**
* Creates the producer configuration for connecting to kafka.
* @return ProducerConfig object
*/
def createProducerConfig: ProducerConfig = {
val props = new Properties()
props.put("metadata.broker.list", kafkaBrokers)
props.put("serializer.class", "kafka.serializer.StringEncoder")
props.put("request.required.acks", "1")
new ProducerConfig(props)
}
override def cleanup(): Unit = {
super.cleanup()
if (kafkaProducer != null)
kafkaProducer.close()
}
}
|
Sotera/datawake-prefetch
|
trail-specific-search/src/main/scala/com/soteradefense/datawake/trails/bolts/HighLevelKafkaProducer.scala
|
Scala
|
apache-2.0
| 1,584 |
package leo.datastructures
/**
* Created by lex on 16.10.14.
*/
abstract class Position(protected val seq: Seq[Int]) extends Pretty {
import leo.datastructures.Position.DerivedPos
def posHead: Int = seq.head
def tail: Position = DerivedPos(seq.tail)
def abstrPos: Position = new DerivedPos(seq :+ 1)
def headPos: Position = new DerivedPos(seq :+ 0)
def argPos(i: Int): Position = new DerivedPos(seq :+ i)
def prependAbstrPos: Position = new DerivedPos(1 +: seq)
def prependHeadPos: Position = new DerivedPos(0 +: seq)
def preprendArgPos(i: Int): Position = new DerivedPos(i +: seq)
def pretty = if (seq.isEmpty)
"ε"
else
seq.mkString(",")
}
object Position {
def root: Position = RootPos
protected case class DerivedPos(pos: Seq[Int]) extends Position(pos)
protected case object RootPos extends Position(Seq.empty)
object AbstrPos {
def unapply(pos: Position): Boolean = {
pos.seq.nonEmpty && pos.seq.head == 1
}
}
object HeadPos {
def unapply(pos: Position): Boolean = {
pos.seq.nonEmpty && pos.seq.head == 0
}
}
object ArgsPos {
def unapply(pos: Position): Boolean = {
pos.seq.nonEmpty
}
}
}
|
cbenzmueller/LeoPARD
|
src/main/scala/leo/datastructures/Position.scala
|
Scala
|
bsd-3-clause
| 1,235 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.plan.rules.logical
import java.util
import org.apache.calcite.plan.RelOptRule.{none, operand}
import org.apache.calcite.plan.{RelOptRule, RelOptRuleCall}
import org.apache.calcite.rex.RexProgram
import org.apache.flink.table.api.TableException
import org.apache.flink.table.catalog.{CatalogManager, FunctionCatalog, GenericInMemoryCatalog}
import org.apache.flink.table.expressions.{Expression, PlannerExpression}
import org.apache.flink.table.plan.nodes.logical.{FlinkLogicalCalc, FlinkLogicalTableSourceScan}
import org.apache.flink.table.plan.util.RexProgramExtractor
import org.apache.flink.table.sources.FilterableTableSource
import org.apache.flink.util.Preconditions
import scala.collection.JavaConverters._
class PushFilterIntoTableSourceScanRule extends RelOptRule(
operand(classOf[FlinkLogicalCalc],
operand(classOf[FlinkLogicalTableSourceScan], none)),
"PushFilterIntoTableSourceScanRule") {
private val defaultCatalog = "default_catalog"
private val catalogManager = new CatalogManager(
defaultCatalog, new GenericInMemoryCatalog(defaultCatalog, "default_database"))
override def matches(call: RelOptRuleCall): Boolean = {
val calc: FlinkLogicalCalc = call.rel(0).asInstanceOf[FlinkLogicalCalc]
val scan: FlinkLogicalTableSourceScan = call.rel(1).asInstanceOf[FlinkLogicalTableSourceScan]
scan.tableSource match {
case source: FilterableTableSource[_] =>
calc.getProgram.getCondition != null && !source.isFilterPushedDown
case _ => false
}
}
override def onMatch(call: RelOptRuleCall): Unit = {
val calc: FlinkLogicalCalc = call.rel(0).asInstanceOf[FlinkLogicalCalc]
val scan: FlinkLogicalTableSourceScan = call.rel(1).asInstanceOf[FlinkLogicalTableSourceScan]
val filterableSource = scan.tableSource.asInstanceOf[FilterableTableSource[_]]
pushFilterIntoScan(call, calc, scan, filterableSource, description)
}
private def pushFilterIntoScan(
call: RelOptRuleCall,
calc: FlinkLogicalCalc,
scan: FlinkLogicalTableSourceScan,
filterableSource: FilterableTableSource[_],
description: String): Unit = {
Preconditions.checkArgument(!filterableSource.isFilterPushedDown)
val program = calc.getProgram
val (predicates, unconvertedRexNodes) =
RexProgramExtractor.extractConjunctiveConditions(
program,
call.builder().getRexBuilder,
new FunctionCatalog(catalogManager))
if (predicates.isEmpty) {
// no condition can be translated to expression
return
}
val remainingPredicates = new util.LinkedList[Expression]()
predicates.foreach(e => remainingPredicates.add(e))
val newTableSource = filterableSource.applyPredicate(remainingPredicates)
if (newTableSource.asInstanceOf[FilterableTableSource[_]].isFilterPushedDown
&& newTableSource.explainSource().equals(scan.tableSource.explainSource())) {
throw new TableException("Failed to push filter into table source! "
+ "table source with pushdown capability must override and change "
+ "explainSource() API to explain the pushdown applied!")
}
// check whether framework still need to do a filter
val relBuilder = call.builder()
val remainingCondition = {
if (!remainingPredicates.isEmpty || unconvertedRexNodes.nonEmpty) {
relBuilder.push(scan)
// TODO we cast to planner expressions as a temporary solution to keep the old interfaces
val remainingPrecidatesAsExpr = remainingPredicates
.asScala
.map(_.asInstanceOf[PlannerExpression])
val remainingConditions = (remainingPrecidatesAsExpr.map(_.toRexNode(relBuilder))
++ unconvertedRexNodes)
remainingConditions.reduce((l, r) => relBuilder.and(l, r))
} else {
null
}
}
// check whether we still need a RexProgram. An RexProgram is needed when either
// projection or filter exists.
val newScan = scan.copy(scan.getTraitSet, newTableSource, scan.selectedFields)
val newRexProgram = {
if (remainingCondition != null || !program.projectsOnlyIdentity) {
val expandedProjectList = program.getProjectList.asScala
.map(ref => program.expandLocalRef(ref)).asJava
RexProgram.create(
program.getInputRowType,
expandedProjectList,
remainingCondition,
program.getOutputRowType,
relBuilder.getRexBuilder)
} else {
null
}
}
if (newRexProgram != null) {
val newCalc = calc.copy(calc.getTraitSet, newScan, newRexProgram)
call.transformTo(newCalc)
} else {
call.transformTo(newScan)
}
}
}
object PushFilterIntoTableSourceScanRule {
val INSTANCE: RelOptRule = new PushFilterIntoTableSourceScanRule
}
|
mbode/flink
|
flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/plan/rules/logical/PushFilterIntoTableSourceScanRule.scala
|
Scala
|
apache-2.0
| 5,631 |
package run.local
import org.apache.spark.sql.SparkSession
import run.shared.GraphXExample
object GraphXLocalExample {
def main(args: Array[String]): Unit = {
val ss = SparkSession.builder().appName("Word2Vec Local Example").master("local[*]").getOrCreate()
GraphXExample.runGraphXExample(ss)
}
}
|
Erwangf/wikipedia-mining
|
src/main/scala/run/local/GraphXLocalExample.scala
|
Scala
|
mit
| 312 |
package org.scalafmt.util
import scala.annotation.tailrec
import scala.collection.mutable
import scala.meta.Lit
import scala.meta.Name
import scala.meta.Term
import scala.meta.Tree
import scala.meta.tokens.Token
import scala.meta.tokens.Token.Comment
import scala.meta.tokens.Token.LeftParen
import scala.meta.tokens.Token.RightParen
import org.scalafmt.config.BinPack
import org.scalafmt.config.Config
import org.scalafmt.config.FilterMatcher
import org.scalafmt.config.ScalafmtConfig
import org.scalafmt.internal.FormatToken
import org.scalafmt.internal.FormatTokens
import org.scalameta.FileLine
import org.scalameta.logger
class StyleMap(
tokens: FormatTokens,
val init: ScalafmtConfig
) {
import StyleMap._
val literalR: FilterMatcher = init.binPack.literalsRegex
private val prefix = "\\s*scalafmt: ".r
val forcedBinPack: mutable.Set[Tree] = mutable.Set.empty
private val (
starts: Array[Int],
styles: Array[ScalafmtConfig]
) = {
var curr = init
val startBuilder = Array.newBuilder[Int]
val styleBuilder = Array.newBuilder[ScalafmtConfig]
startBuilder += 0
styleBuilder += init
val disableBinPack = mutable.Map.empty[Token, BinPack.Unsafe]
def warn(err: String)(implicit fileLine: FileLine): Unit = logger.elem(err)
tokens.arr.foreach { tok =>
def changeStyle(style: ScalafmtConfig): Option[ScalafmtConfig] = {
val changing = curr != style
if (!changing) None
else {
startBuilder += tok.left.start
styleBuilder += style
val prev = curr
curr = style
Some(prev)
}
}
tok.left match {
case Comment(c) if prefix.findFirstIn(c).isDefined =>
val configured = Config.fromHoconString(c, init, Some("scalafmt"))
// TODO(olafur) report error via callback
configured.foreach(logger.elem(_)) { style =>
init.rewrite.rulesChanged(style.rewrite).foreach { x =>
warn(x.mkString("May not override rewrite settings: ", ",", ""))
}
changeStyle(style)
}
case open @ LeftParen()
if curr.binPack.literalArgumentLists &&
opensLiteralArgumentList(tok)(curr) =>
forcedBinPack += tok.meta.leftOwner
changeStyle(setBinPack(curr, callSite = BinPack.Unsafe.Always))
.foreach { x =>
val unsafe = x.binPack.unsafeCallSite
tokens.matchingOpt(open).foreach(disableBinPack.update(_, unsafe))
}
case close @ RightParen() =>
disableBinPack.remove(close).foreach { x =>
changeStyle(setBinPack(curr, callSite = x))
}
case _ =>
}
}
(startBuilder.result(), styleBuilder.result())
}
@tailrec
private def isBasicLiteral(
tree: Tree
)(implicit style: ScalafmtConfig): Boolean =
tree match {
case lit: Lit =>
val strName = tree match {
case t: Lit.Int
if 0 <= t.value && t.value < Byte.MaxValue &&
lit.tokens.head.toString.startsWith("0x") =>
"Byte"
case _: Lit.Null => "Null"
case _ => lit.value.getClass.getName
}
literalR.matches(strName)
case x: Name => literalR.matches(x.productPrefix)
case _ if !style.binPack.literalsIncludeSimpleExpr => false
case t: Term.Select => isBasicLiteral(t.qual)
case t: Term.Assign => isBasicLiteral(t.rhs)
case _ =>
tree.children match {
case Nil => true
case one :: Nil => isBasicLiteral(one)
case _ => false
}
}
@tailrec
private def isLiteral(tree: Tree)(implicit style: ScalafmtConfig): Boolean =
isBasicLiteral(tree) ||
style.binPack.literalsIncludeSimpleExpr && (tree match {
case t: Term.Assign => isLiteral(t.rhs)
case t: Term.Apply =>
isBasicLiteral(t.fun) && (t.args match {
case Nil => true
case arg :: Nil => isLiteral(arg)
case _ => false
})
case Term.New(t) =>
isBasicLiteral(t.name) && (t.argss match {
case Nil => true
case (arg :: Nil) :: Nil => isLiteral(arg)
case _ => false
})
case _ =>
tree.children match {
case Nil => true
case one :: Nil => isLiteral(one)
case _ => false
}
})
def opensLiteralArgumentList(
ft: FormatToken
)(implicit style: ScalafmtConfig): Boolean =
ft.meta.leftOwner match {
case TreeOps.SplitCallIntoParts(_, eitherArgs) =>
eitherArgs
.fold(Some(_), TokenOps.findArgsFor(ft.left, _, tokens.matchingOpt))
.exists { args =>
args.lengthCompare(style.binPack.literalsMinArgCount) >= 0 &&
args.forall(isLiteral)
}
case _ => false
}
@inline
def at(token: FormatToken): ScalafmtConfig =
at(token.left)
@inline
def forall(f: ScalafmtConfig => Boolean): Boolean = styles.forall(f)
def at(token: Token): ScalafmtConfig = {
// since init is at pos 0, idx cannot be -1
val idx = java.util.Arrays.binarySearch(starts, token.start)
if (idx >= 0) styles(idx) else styles(-idx - 2)
}
private[util] def numEntries: Int = styles.length
}
object StyleMap {
def setBinPack(
curr: ScalafmtConfig,
callSite: BinPack.Unsafe
): ScalafmtConfig =
if (curr.binPack.unsafeCallSite == callSite) curr
else curr.copy(binPack = curr.binPack.copy(unsafeCallSite = callSite))
}
|
scalameta/scalafmt
|
scalafmt-core/shared/src/main/scala/org/scalafmt/util/StyleMap.scala
|
Scala
|
apache-2.0
| 5,592 |
package io.github.tailhq.dynaml.utils.sumac
import scala.util.parsing.combinator.RegexParsers
/**
* The base combinator parser definitions to parse a list of items, where items can be quoted and include the list
* separator or not.
* User: andrews
* Date: 3/28/14
*/
trait BaseCombinatorParser[T] extends RegexParsers {
/**
* extra characters to forbid in unquoted tokens
*/
protected def extraForbiddenChars: String
/**
* the separator in the list
*/
protected val separator: String = ","
/**
* what makes an item in the list
*/
protected val item: Parser[T]
//need to be lazy as we are waiting for extraFrobiddenChars to exist
//either it's not quoted, there might be at most one single or double quote in it (simplification)
private lazy val noQuote = s"""[^"'$extraForbiddenChars$separator]*["']?[^"'$extraForbiddenChars$separator]*""".r
//or it's quoted with single or double quotes and anything goes except the quote
private lazy val quoted = "\\"" ~> """[^"]+""".r <~ "\\"" | "'" ~> "[^']+".r <~ "'"
/**
* a token is either anything without a separator in it, or a quoted string that might contain the separator
*/
protected lazy val token: Parser[String] = quoted | noQuote
/**
* a list of items
*/
protected val list: Parser[Seq[T]] = rep1(item, separator ~> item)
/**
* parse the list of items
* @param in
* @return
*/
def parse(in: String): Seq[T] = parseAll(list, in) match {
case Success(result, _) => result
case failure: NoSuccess =>
throw new IllegalArgumentException(s"'$in' cannot be parsed. Caused by: ${failure.msg}}")
}
}
|
mandar2812/DynaML
|
dynaml-core/src/main/scala/io/github/tailhq/dynaml/utils/sumac/BaseCombinatorParser.scala
|
Scala
|
apache-2.0
| 1,650 |
package janstenpickle.vault.manage
import janstenpickle.vault.core.VaultSpec
import janstenpickle.vault.manage.Model.Rule
import org.scalacheck.{Gen, Prop}
import org.specs2.ScalaCheck
import uscala.result.Result
class PolicyIT extends VaultSpec with ScalaCheck {
import PolicyIT._
import VaultSpec._
override def is =
s2"""
Can successfully set and get policies $happy
Cannot set an invalid policy $sad
"""
lazy val underTest = Policy(config)
def happy = Prop.forAllNoShrink(
longerStrGen,
Gen.listOf(ruleGen(longerStrGen, policyGen, capabilitiesGen)).
suchThat(_.nonEmpty)) { (name, rules) =>
(underTest.set(name.toLowerCase, rules)
.attemptRun(_.getMessage()) must beOk) and
(underTest.inspect(name.toLowerCase)
.attemptRun(_.getMessage()) must beOk) and
(underTest.delete(name.toLowerCase).attemptRun(_.getMessage()) must beOk)
}
// cannot use generated values here as
// vault seems to have a failure rate limit
def sad = underTest.set(
"nic", List(Rule("cage", Some(List("kim", "copolla"))))
).attemptRun(_.getMessage()) must beFail
}
object PolicyIT {
val policyGen = Gen.option(Gen.oneOf("read", "write", "sudo", "deny"))
val capabilitiesGen =
Gen.listOf(Gen.oneOf(
"create", "read", "update", "delete", "list", "sudo", "deny")).
suchThat(_.nonEmpty).
map(_.distinct)
def ruleGen(
pathGen: Gen[String],
polGen: Gen[Option[String]],
capGen: Gen[List[String]]
) = for {
path <- pathGen
policy <- polGen
capabilities <- capGen
} yield Rule(path, Some(capabilities), policy)
}
|
janstenpickle/scala-vault
|
manage/src/it/scala/janstenpickle/vault/manage/PolicyIT.scala
|
Scala
|
mit
| 1,632 |
package com.scalableQuality.quick.core.phases
import org.scalatest.prop.{
GeneratorDrivenPropertyChecks,
TableDrivenPropertyChecks
}
import org.scalatest.{FlatSpec, Matchers}
class ShouldUseDuringTest
extends FlatSpec
with Matchers
with GeneratorDrivenPropertyChecks
with TableDrivenPropertyChecks {
"com.scalableQuality.quick.core.phases.ShouldUseDuringTest.apply(MetaData)" should "default to false if attribute is missing" in {
val columnDescription = <ColumnDescription />
val shouldUseDuringEither = ShouldUseDuring(columnDescription.attributes)
shouldUseDuringEither match {
case Left(_) => fail
case Right(shouldUseDuring) =>
(shouldUseDuring(ValidationStage) || shouldUseDuring(MatchingStage) || shouldUseDuring(
ReportingStage)) shouldBe false
}
}
it should "should handle upper case boolean values" in forAll {
(useDuringValidation: Boolean,
useDuringMatching: Boolean,
useDuringReporting: Boolean) =>
val columnDescription = <ColumnDescription
useDuringValidation={useDuringValidation.toString.toUpperCase}
useDuringMatching={useDuringMatching.toString.toUpperCase}
useDuringReporting={useDuringReporting.toString.toUpperCase} />
val shouldUseDuringEither = ShouldUseDuring(columnDescription.attributes)
shouldUseDuringEither match {
case Left(_) => fail
case Right(shouldUseDuring) =>
assert(
shouldUseDuring(ValidationStage) === useDuringValidation &&
shouldUseDuring(MatchingStage) === useDuringMatching &&
shouldUseDuring(ReportingStage) === useDuringReporting
)
}
}
val invalidBooleans = Table(
("useDuringValidation", "useDuringMatching", "useDuringMatching"),
("blabla", "true", "true"),
("true", "blabla", "true"),
("true", "true", "blabla")
)
it should "return Left[ErrorMessage, ShouldUseDuring] if any of the attribute have an invald boolean" in
forAll(invalidBooleans) {
(useDuringValidation: String,
useDuringMatching: String,
useDuringReporting: String) =>
val columnDescription = <ColumnDescription
useDuringValidation={useDuringValidation}
useDuringMatching={useDuringMatching}
useDuringReporting={useDuringReporting} />
val shouldUseDuringEither =
ShouldUseDuring(columnDescription.attributes)
shouldUseDuringEither shouldBe a[Left[_, _]]
}
"com.scalableQuality.quick.core.phases.ShouldUseDuringTest.apply(ColumnUsageStages*)" should "map its boolean input to the appropriate ColumnUsageStage" in forAll {
(useDuringValidation: Boolean,
useDuringMatching: Boolean,
useDuringReporting: Boolean) =>
val columnDescription = <ColumnDescription
useDuringValidation={useDuringValidation.toString}
useDuringMatching={useDuringMatching.toString}
useDuringReporting={useDuringReporting.toString} />
val shouldUseDuringEither = ShouldUseDuring(columnDescription.attributes)
shouldUseDuringEither match {
case Left(_) => fail
case Right(shouldUseDuring) =>
assert(
shouldUseDuring(ValidationStage) === useDuringValidation &&
shouldUseDuring(MatchingStage) === useDuringMatching &&
shouldUseDuring(ReportingStage) === useDuringReporting
)
}
}
it should "return true if the column is used in at least one of the ColumnUsageStages passed as arguments" in forAll {
(useDuringValidation: Boolean,
useDuringMatching: Boolean,
useDuringReporting: Boolean) =>
val columnDescription = <ColumnDescription
useDuringValidation={useDuringValidation.toString}
useDuringMatching={useDuringMatching.toString}
useDuringReporting={useDuringReporting.toString} />
val shouldUseDuringEither = ShouldUseDuring(columnDescription.attributes)
shouldUseDuringEither match {
case Left(_) => fail
case Right(shouldUseDuring) =>
shouldUseDuring(ValidationStage, MatchingStage, ReportingStage) shouldBe (useDuringValidation || useDuringMatching || useDuringReporting)
}
}
}
|
MouslihAbdelhakim/Quick
|
src/test/scala/com/scalableQuality/quick/core/phases/ShouldUseDuringTest.scala
|
Scala
|
apache-2.0
| 4,228 |
package lila.practice
import scala.concurrent.duration._
import reactivemongo.api.ReadPreference
import lila.common.Bus
import lila.db.dsl._
import lila.memo.CacheApi._
import lila.study.{ Chapter, Study }
import lila.user.User
final class PracticeApi(
coll: Coll,
configStore: lila.memo.ConfigStore[PracticeConfig],
cacheApi: lila.memo.CacheApi,
studyApi: lila.study.StudyApi
)(implicit ec: scala.concurrent.ExecutionContext) {
import BSONHandlers._
def get(user: Option[User]): Fu[UserPractice] =
for {
struct <- structure.get
prog <- user.fold(fuccess(PracticeProgress.anon))(progress.get)
} yield UserPractice(struct, prog)
def getStudyWithFirstOngoingChapter(user: Option[User], studyId: Study.Id): Fu[Option[UserStudy]] =
for {
up <- get(user)
chapters <- studyApi.chapterMetadatas(studyId)
chapter = up.progress firstOngoingIn chapters
studyOption <- chapter.fold(studyApi byIdWithFirstChapter studyId) { chapter =>
studyApi.byIdWithChapter(studyId, chapter.id)
}
} yield makeUserStudy(studyOption, up, chapters)
def getStudyWithChapter(
user: Option[User],
studyId: Study.Id,
chapterId: Chapter.Id
): Fu[Option[UserStudy]] =
for {
up <- get(user)
chapters <- studyApi.chapterMetadatas(studyId)
studyOption <- studyApi.byIdWithChapter(studyId, chapterId)
} yield makeUserStudy(studyOption, up, chapters)
private def makeUserStudy(
studyOption: Option[Study.WithChapter],
up: UserPractice,
chapters: List[Chapter.Metadata]
) =
for {
rawSc <- studyOption
sc = rawSc.copy(
study = rawSc.study.rewindTo(rawSc.chapter).withoutMembers,
chapter = rawSc.chapter.withoutChildrenIfPractice
)
practiceStudy <- up.structure study sc.study.id
section <- up.structure findSection sc.study.id
publishedChapters = chapters.filterNot { c =>
PracticeStructure isChapterNameCommented c.name
}
if publishedChapters.exists(_.id == sc.chapter.id)
} yield UserStudy(up, practiceStudy, publishedChapters, sc, section)
object config {
def get = configStore.get dmap (_ | PracticeConfig.empty)
def set = configStore.set _
def form = configStore.makeForm
}
object structure {
private val cache = cacheApi.unit[PracticeStructure] {
_.expireAfterAccess(3.hours)
.buildAsyncFuture { _ =>
for {
conf <- config.get
chapters <- studyApi.chapterIdNames(conf.studyIds)
} yield PracticeStructure.make(conf, chapters)
}
}
def get = cache.getUnit
def clear() = cache.invalidateUnit()
def onSave(study: Study) =
get foreach { structure =>
if (structure.hasStudy(study.id)) clear()
}
}
object progress {
import PracticeProgress.NbMoves
def get(user: User): Fu[PracticeProgress] =
coll.one[PracticeProgress]($id(user.id)) dmap {
_ | PracticeProgress.empty(PracticeProgress.Id(user.id))
}
private def save(p: PracticeProgress): Funit =
coll.update.one($id(p.id), p, upsert = true).void
def setNbMoves(user: User, chapterId: Chapter.Id, score: NbMoves): Funit = {
get(user) flatMap { prog =>
save(prog.withNbMoves(chapterId, score))
}
} >>- studyApi.studyIdOf(chapterId).foreach {
_ ?? { studyId =>
Bus.publish(PracticeProgress.OnComplete(user.id, studyId, chapterId), "finishPractice")
}
}
def reset(user: User) =
coll.delete.one($id(user.id)).void
def completionPercent(userIds: List[User.ID]): Fu[Map[User.ID, Int]] =
coll
.aggregateList(
maxDocs = Int.MaxValue,
readPreference = ReadPreference.secondaryPreferred
) { framework =>
import framework._
Match($doc("_id" $in userIds)) -> List(
Project(
$doc(
"nb" -> $doc(
"$size" -> $doc(
"$objectToArray" -> "$chapters"
)
)
)
)
)
}
.map {
_.view.flatMap { obj =>
import cats.implicits._
(obj.string("_id"), obj.int("nb")) mapN { (k, v) =>
k -> (v * 100f / PracticeStructure.totalChapters).toInt
}
}.toMap
}
}
}
|
luanlv/lila
|
modules/practice/src/main/PracticeApi.scala
|
Scala
|
mit
| 4,458 |
package application
import java.time.ZonedDateTime
import javax.inject.Named
import adapter.BitFlyer
import adapter.aws.{MailContent, SES, OrderQueueBody, SQS}
import adapter.bitflyer.PubNubReceiver
import akka.actor.ActorRef
import com.google.gson.Gson
import com.google.inject.{Inject, Singleton}
import com.pubnub.api.PubNub
import com.pubnub.api.callbacks.SubscribeCallback
import com.pubnub.api.models.consumer.PNStatus
import com.pubnub.api.models.consumer.pubsub.{PNPresenceEventResult, PNMessageResult}
import domain.{ProductCode, models}
import domain.models.{Ticker, Orders}
import domain.strategy.{Strategies, Strategy}
import domain.time.DateUtil
import play.api.{Configuration, Logger}
import service.DataLoader
import scala.concurrent.Future
@Singleton
class RealTimeReceiver @Inject()(config: Configuration, @Named("candle") candleActor: ActorRef, strategySettingApplication: StrategySettingApplication, sqs: SQS) {
Logger.info("init RealTimeReceiver")
def start(): Unit = {
val gson: Gson = new Gson()
val productCode = s"lightning_ticker_${ProductCode.btcFx}"
val key = "sub-c-52a9ab50-291b-11e5-baaa-0619f8945a4f"
val callback = new SubscribeCallback() {
override def message(pubnub: PubNub, message: PNMessageResult): Unit = {
val ticker: Ticker = gson.fromJson(message.getMessage, classOf[Ticker])
val recentTickerTime = ZonedDateTime.parse(ticker.timestamp).minusSeconds(1)
Strategies.values.filter(_.isAvailable) foreach (strategy => {
Future { // parallel for each strategy
strategy.synchronized { // run in order in the same strategy
if (recentTickerTime.isBefore(ZonedDateTime.parse(ticker.timestamp))) { // skip old ticker after retrying
(try {
strategy.judgeByTicker(ticker)
} catch {
case e: Exception =>
e.printStackTrace()
None
}).foreach(ordering => {
val now = DateUtil.now().toString
val order: models.Order = Orders.market(ordering)
Logger.info(
s"[order][${strategy.state.id}][${if (ordering.isEntry) "entry" else "close"}:${order.side}][${ticker.timestamp}] price:${ticker.ltp.toLong} size:${order.size}")
(try {
Some(retry(if (ordering.isEntry) 5 else 20, () => BitFlyer.orderByMarket(order, strategy.key, strategy.secret)))
} catch {
case _: Exception =>
// request error case
strategy.state.orderId = None
strategy.state.order = None
if (!ordering.isEntry) {
Logger.warn("!!!close request failed.!!!")
sendRequestFailureNoticeMail(strategy, ordering)
}
None
}).foreach(response => {
val newState = if (ordering.isEntry) {
// entry case
sqs.send(OrderQueueBody(strategy.email, strategy.state.id, response.child_order_acceptance_id, now))
strategy.state.copy(orderId = Some(response.child_order_acceptance_id), order = Some(ordering))
} else {
// close case
sqs.send(OrderQueueBody(strategy.email, strategy.state.id, response.child_order_acceptance_id, now, strategy.state.orderId))
strategy.state.copy(orderId = None, order = None)
}
strategySettingApplication.updateOrder(strategy.email, newState)
})
})
}
}
}(scala.concurrent.ExecutionContext.Implicits.global)
})
Strategies.putTicker(ticker)
}
override def presence(pubnub: PubNub, presence: PNPresenceEventResult): Unit = {
Logger.info("RealTimeReceiver#presence")
Logger.info(presence.toString)
}
override def status(pubnub: PubNub, status: PNStatus): Unit = {
Logger.info("RealTimeReceiver#status")
Logger.info(status.toString)
}
}
PubNubReceiver.start(productCode, key, callback)
Logger.info("PubNubReceiver started")
def loadInitialData() = {
Future {
val initialData: Seq[Ticker] = DataLoader.loadFromS3()
// val initialData: Seq[Ticker] = DataLoader.loadFromLocal()
initialData.foreach(ticker => {
Strategies.coreData.putTicker(ticker)
Strategies.values.foreach(_.putTicker(ticker))
})
Strategies.processEvery1minutes()
DataLoader.loaded = true
}(scala.concurrent.ExecutionContext.Implicits.global)
}
loadInitialData()
}
private def sendRequestFailureNoticeMail(strategy: Strategy, ordering: models.Ordering): Unit = {
val subject = "close request failed"
val text = s"failed: ${ordering.side} size:${ordering.size}"
SES.send(MailContent(strategy.email, "[email protected]", subject, text, text))
}
Future {
Thread.sleep(10 * 1000)
if (!domain.isBackTesting) {
start
}
}(scala.concurrent.ExecutionContext.Implicits.global)
}
|
rysh/scalatrader
|
scalatrader/app/application/RealTimeReceiver.scala
|
Scala
|
mit
| 5,300 |
package models
import scalikejdbc._
import skinny.orm.{Alias, SkinnyNoIdCRUDMapper}
import utils.MissionTime
case class GameProgress(
gameId: Long,
stationId: Long,
arrivalTime: Option[MissionTime],
station: Option[Station] = None
) {
def save()(implicit session: DBSession): Unit = GameProgress.save(this)
def update()(implicit session: DBSession): Unit = GameProgress.update(this)
}
object GameProgress extends SkinnyNoIdCRUDMapper[GameProgress] {
override val defaultAlias: Alias[GameProgress] = createAlias("gp")
override def extract(rs: WrappedResultSet, n: ResultName[GameProgress]): GameProgress = autoConstruct(rs, n, "station")
lazy val stationRef = belongsTo[Station](
right = Station,
merge = (gp, s) => gp.copy(station = s)
)
def save(gp: GameProgress)(implicit session: DBSession): Unit =
createWithAttributes(
'gameId -> gp.gameId,
'stationId -> gp.stationId,
'arrivalTime -> gp.arrivalTime.map(_.toString)
)
def update(gp: GameProgress)(implicit session: DBSession): Unit =
updateBy(sqls.eq(column.gameId, gp.gameId).and.eq(column.stationId, gp.stationId))
.withAttributes('arrivalTime -> gp.arrivalTime.map(_.toString))
}
|
ponkotuy/train-stamp-rally
|
app/models/GameProgress.scala
|
Scala
|
apache-2.0
| 1,222 |
package app.restlike.common
case class RefProvider(initial: Long) {
private var count = initial
def next = synchronized {
count += 1
s"$count"
}
}
|
alltonp/reprobate
|
src/main/scala/app/restlike/common/RefProvider.scala
|
Scala
|
apache-2.0
| 163 |
package model
import skinny.DBSettings
import skinny.test._
import org.scalatest.fixture.FlatSpec
import org.scalatest._
import scalikejdbc._
import scalikejdbc.scalatest._
import org.joda.time._
class MailBatchDetailSpec extends FlatSpec with Matchers with DBSettings with AutoRollback {
}
|
yoshitakes/skinny-task-example
|
src/test/scala/model/MailBatchDetailSpec.scala
|
Scala
|
mit
| 293 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.api
import java.nio._
import kafka.api.ApiUtils._
import kafka.network.{RequestOrResponseSend, RequestChannel, InvalidRequestException}
import kafka.common.{TopicAndPartition, ErrorMapping}
import kafka.network.RequestChannel.Response
import kafka.utils.Logging
import collection.Set
object StopReplicaRequest extends Logging {
val CurrentVersion = 0.shortValue
val DefaultClientId = ""
val DefaultAckTimeout = 100
def readFrom(buffer: ByteBuffer): StopReplicaRequest = {
val versionId = buffer.getShort
val correlationId = buffer.getInt
val clientId = readShortString(buffer)
val controllerId = buffer.getInt
val controllerEpoch = buffer.getInt
val deletePartitions = buffer.get match {
case 1 => true
case 0 => false
case x =>
throw new InvalidRequestException("Invalid byte %d in delete partitions field. (Assuming false.)".format(x))
}
val topicPartitionPairCount = buffer.getInt
val topicPartitionPairSet = new collection.mutable.HashSet[TopicAndPartition]()
(1 to topicPartitionPairCount) foreach { _ =>
topicPartitionPairSet.add(TopicAndPartition(readShortString(buffer), buffer.getInt))
}
StopReplicaRequest(versionId, correlationId, clientId, controllerId, controllerEpoch,
deletePartitions, topicPartitionPairSet.toSet)
}
}
case class StopReplicaRequest(versionId: Short,
correlationId: Int,
clientId: String,
controllerId: Int,
controllerEpoch: Int,
deletePartitions: Boolean,
partitions: Set[TopicAndPartition])
extends RequestOrResponse(Some(RequestKeys.StopReplicaKey)) {
def this(deletePartitions: Boolean, partitions: Set[TopicAndPartition], controllerId: Int, controllerEpoch: Int, correlationId: Int) = {
this(StopReplicaRequest.CurrentVersion, correlationId, StopReplicaRequest.DefaultClientId,
controllerId, controllerEpoch, deletePartitions, partitions)
}
def writeTo(buffer: ByteBuffer) {
buffer.putShort(versionId)
buffer.putInt(correlationId)
writeShortString(buffer, clientId)
buffer.putInt(controllerId)
buffer.putInt(controllerEpoch)
buffer.put(if (deletePartitions) 1.toByte else 0.toByte)
buffer.putInt(partitions.size)
for (topicAndPartition <- partitions) {
writeShortString(buffer, topicAndPartition.topic)
buffer.putInt(topicAndPartition.partition)
}
}
def sizeInBytes(): Int = {
var size =
2 + /* versionId */
4 + /* correlation id */
ApiUtils.shortStringLength(clientId) +
4 + /* controller id*/
4 + /* controller epoch */
1 + /* deletePartitions */
4 /* partition count */
for (topicAndPartition <- partitions){
size += (ApiUtils.shortStringLength(topicAndPartition.topic)) +
4 /* partition id */
}
size
}
override def toString(): String = {
describe(true)
}
override def handleError(e: Throwable, requestChannel: RequestChannel, request: RequestChannel.Request): Unit = {
val responseMap = partitions.map {
case topicAndPartition => (topicAndPartition, ErrorMapping.codeFor(e.getClass.asInstanceOf[Class[Throwable]]))
}.toMap
val errorResponse = StopReplicaResponse(correlationId, responseMap)
requestChannel.sendResponse(new Response(request, new RequestOrResponseSend(request.connectionId, errorResponse)))
}
override def describe(details: Boolean): String = {
val stopReplicaRequest = new StringBuilder
stopReplicaRequest.append("Name: " + this.getClass.getSimpleName)
stopReplicaRequest.append("; Version: " + versionId)
stopReplicaRequest.append("; CorrelationId: " + correlationId)
stopReplicaRequest.append("; ClientId: " + clientId)
stopReplicaRequest.append("; DeletePartitions: " + deletePartitions)
stopReplicaRequest.append("; ControllerId: " + controllerId)
stopReplicaRequest.append("; ControllerEpoch: " + controllerEpoch)
if(details)
stopReplicaRequest.append("; Partitions: " + partitions.mkString(","))
stopReplicaRequest.toString()
}
}
|
usakey/kafka
|
core/src/main/scala/kafka/api/StopReplicaRequest.scala
|
Scala
|
apache-2.0
| 5,056 |
package scala
class Board {
// dlaczego [String] jest po nazwie metody a nie po nazwie klasy?
// czy to ma jakiś związek z genericsami?
// var state = Array.fill[String]( 3, 3 )( " " )
var state = Array.ofDim[String]( 3, 3 )
for ( i <- 0 to 2 ) {
for ( j <- 0 to 2 ) {
// czy właściwie jest jakiś powód używania takiej dziwnej notacji
state( j )( i ) = ( i + 1 + j * 3 ).toString;
// czyżby preferencja kolumn względem wierszy?
}
}
// co ja tu własciwie zrobiłem? co oznacza _ ?
def display() = println( state.map( _.mkString( " " ) ).mkString( "\\n" ) )
def checkCross() {
checkPattern( "x" )
}
def checkcCircle() {
checkPattern( "o" )
}
def placeCross( l: Int ) {
placeMark( l, "x" )
}
def placeCircle( l: Int ) {
placeMark( l, "o" )
}
def placeMark( loc : Int, m: String ) {
val row = ( loc - 1 ) / 3
val col = ( loc - 1 ) % 3
state( row )( col ) = m;
}
def checkPattern( p: String ) : Boolean = {
true
}
}
|
jstolarek/sandbox
|
scala/Board.scala
|
Scala
|
unlicense
| 1,028 |
package models
import spray.json.DefaultJsonProtocol
import spray.httpx.unmarshalling._
import spray.httpx.marshalling._
case class Tariff(pence: Int, desc: String)
object TariffJsonProtocol extends DefaultJsonProtocol {
implicit val TariffFormat = jsonFormat2(Tariff)
}
|
opyate/taximeter
|
src/main/scala/models/Tariff.scala
|
Scala
|
mit
| 275 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler
import java.io.{Externalizable, ObjectInput, ObjectOutput}
import org.roaringbitmap.RoaringBitmap
import org.apache.spark.storage.BlockManagerId
import org.apache.spark.util.Utils
/**
* Result returned by a ShuffleMapTask to a scheduler. Includes the block manager address that the
* task ran on as well as the sizes of outputs for each reducer, for passing on to the reduce tasks.
*/
private[spark] sealed trait MapStatus {
/** Location where this task was run. */
def location: BlockManagerId
/**
* Estimated size for the reduce block, in bytes.
*
* If a block is non-empty, then this method MUST return a non-zero size. This invariant is
* necessary for correctness, since block fetchers are allowed to skip zero-size blocks.
*/
def getSizeForBlock(reduceId: Int): Long
}
private[spark] object MapStatus {
def apply(loc: BlockManagerId, uncompressedSizes: Array[Long]): MapStatus = {
if (uncompressedSizes.length > 2000) {
HighlyCompressedMapStatus(loc, uncompressedSizes)
} else {
new CompressedMapStatus(loc, uncompressedSizes)
}
}
private[this] val LOG_BASE = 1.1
/**
* Compress a size in bytes to 8 bits for efficient reporting of map output sizes.
* We do this by encoding the log base 1.1 of the size as an integer, which can support
* sizes up to 35 GB with at most 10% error.
*/
def compressSize(size: Long): Byte = {
if (size == 0) {
0
} else if (size <= 1L) {
1
} else {
math.min(255, math.ceil(math.log(size) / math.log(LOG_BASE)).toInt).toByte
}
}
/**
* Decompress an 8-bit encoded block size, using the reverse operation of compressSize.
*/
def decompressSize(compressedSize: Byte): Long = {
if (compressedSize == 0) {
0
} else {
math.pow(LOG_BASE, compressedSize & 0xFF).toLong
}
}
}
/**
* A [[MapStatus]] implementation that tracks the size of each block. Size for each block is
* represented using a single byte.
*
* @param loc location where the task is being executed.
* @param compressedSizes size of the blocks, indexed by reduce partition id.
*/
private[spark] class CompressedMapStatus(
private[this] var loc: BlockManagerId,
private[this] var compressedSizes: Array[Byte])
extends MapStatus with Externalizable {
protected def this() = this(null, null.asInstanceOf[Array[Byte]]) // For deserialization only
def this(loc: BlockManagerId, uncompressedSizes: Array[Long]) {
this(loc, uncompressedSizes.map(MapStatus.compressSize))
}
override def location: BlockManagerId = loc
override def getSizeForBlock(reduceId: Int): Long = {
MapStatus.decompressSize(compressedSizes(reduceId))
}
override def writeExternal(out: ObjectOutput): Unit = Utils.tryOrIOException {
loc.writeExternal(out)
out.writeInt(compressedSizes.length)
out.write(compressedSizes)
}
override def readExternal(in: ObjectInput): Unit = Utils.tryOrIOException {
loc = BlockManagerId(in)
val len = in.readInt()
compressedSizes = new Array[Byte](len)
in.readFully(compressedSizes)
}
}
/**
* A [[MapStatus]] implementation that only stores the average size of non-empty blocks,
* plus a bitmap for tracking which blocks are empty. During serialization, this bitmap
* is compressed.
*
* @param loc location where the task is being executed
* @param numNonEmptyBlocks the number of non-empty blocks
* @param emptyBlocks a bitmap tracking which blocks are empty
* @param avgSize average size of the non-empty blocks
*/
private[spark] class HighlyCompressedMapStatus private (
private[this] var loc: BlockManagerId,
private[this] var numNonEmptyBlocks: Int,
private[this] var emptyBlocks: RoaringBitmap,
private[this] var avgSize: Long)
extends MapStatus with Externalizable {
// loc could be null when the default constructor is called during deserialization
require(loc == null || avgSize > 0 || numNonEmptyBlocks == 0,
"Average size can only be zero for map stages that produced no output")
protected def this() = this(null, -1, null, -1) // For deserialization only
override def location: BlockManagerId = loc
override def getSizeForBlock(reduceId: Int): Long = {
if (emptyBlocks.contains(reduceId)) {
0
} else {
avgSize
}
}
override def writeExternal(out: ObjectOutput): Unit = Utils.tryOrIOException {
loc.writeExternal(out)
emptyBlocks.writeExternal(out)
out.writeLong(avgSize)
}
override def readExternal(in: ObjectInput): Unit = Utils.tryOrIOException {
loc = BlockManagerId(in)
emptyBlocks = new RoaringBitmap()
emptyBlocks.readExternal(in)
avgSize = in.readLong()
}
}
private[spark] object HighlyCompressedMapStatus {
def apply(loc: BlockManagerId, uncompressedSizes: Array[Long]): HighlyCompressedMapStatus = {
// We must keep track of which blocks are empty so that we don't report a zero-sized
// block as being non-empty (or vice-versa) when using the average block size.
var i = 0
var numNonEmptyBlocks: Int = 0
var totalSize: Long = 0
// From a compression standpoint, it shouldn't matter whether we track empty or non-empty
// blocks. From a performance standpoint, we benefit from tracking empty blocks because
// we expect that there will be far fewer of them, so we will perform fewer bitmap insertions.
val emptyBlocks = new RoaringBitmap()
val totalNumBlocks = uncompressedSizes.length
while (i < totalNumBlocks) {
var size = uncompressedSizes(i)
if (size > 0) {
numNonEmptyBlocks += 1
totalSize += size
} else {
emptyBlocks.add(i)
}
i += 1
}
val avgSize = if (numNonEmptyBlocks > 0) {
totalSize / numNonEmptyBlocks
} else {
0
}
new HighlyCompressedMapStatus(loc, numNonEmptyBlocks, emptyBlocks, avgSize)
}
}
|
Dax1n/spark-core
|
core/src/main/scala/org/apache/spark/scheduler/MapStatus.scala
|
Scala
|
apache-2.0
| 6,736 |
/* RandomizedBackoffSpec.scala
*
* Copyright (c) 2013-2014 linkedin.com
* Copyright (c) 2013-2015 zman.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package atmos.backoff
import scala.concurrent.duration._
import scala.util.{ Failure, Success }
import org.scalatest._
/**
* Test suite for [[atmos.backoff.RandomizedBackoff]].
*/
class RandomizedBackoffSpec extends FlatSpec with Matchers {
val result = "result"
val thrown = new RuntimeException
"RandomizedBackoff" should "adjust the result of another backoff policy with a random value" in {
for {
backoff <- 1L to 100L map (100.millis * _)
(first, second) <- Seq(-10.millis -> 10.millis, 0.millis -> 0.millis, 10.millis -> 0.millis)
policy = RandomizedBackoff(ConstantBackoff(backoff), first -> second)
outcome <- Seq(Success(result), Failure(thrown))
attempt <- 1 to 10
} checkBackoff(backoff, first, second, policy.nextBackoff(attempt, outcome))
}
/** Checks that a randomized duration conforms to the expected range. */
def checkBackoff(base: FiniteDuration, first: FiniteDuration, second: FiniteDuration, result: FiniteDuration) =
if (first == second) result shouldEqual base
else {
val b = base.toNanos
val f = first.toNanos
val s = second.toNanos
val min = b + math.min(f, s)
val max = b + math.max(f, s)
val half = (max - min) / 2
result.toNanos shouldEqual (min + half) +- half
}
}
|
zmanio/atmos
|
src/test/scala/atmos/backoff/RandomizedBackoffSpec.scala
|
Scala
|
apache-2.0
| 1,977 |
package org.jetbrains.plugins.scala.lang.completion3
import org.jetbrains.plugins.scala.{LatestScalaVersions, ScalaVersion}
import org.jetbrains.plugins.scala.base.SharedTestProjectToken
class ScalaExtensionMethodCompletionTest extends ScalaCodeInsightTestBase {
override protected def supportedIn(version: ScalaVersion): Boolean =
version >= LatestScalaVersions.Scala_3_0
override def sharedProjectToken: SharedTestProjectToken = SharedTestProjectToken(this.getClass)
def testSimpleExtension(): Unit = doCompletionTest(
s"""object Test {
| extension (s: String)
| def digits: Seq[Char] = s.filter(_.isDigit)
|
| "foo123".di$CARET
|}""".stripMargin,
s"""object Test {
| extension (s: String)
| def digits: Seq[Char] = s.filter(_.isDigit)
|
| "foo123".digits
|}""".stripMargin,
item = "digits"
)
def testExtensionFromGiven(): Unit = doCompletionTest(
s"""object math3:
| trait Ord[T]
|
| trait Numeric[T] extends Ord[T]:
| extension (x: Int) def numeric: T = ???
|
|object Test3:
| import math3.Numeric
|
| def to[T: Numeric](x: Int): T =
| x.num$CARET""".stripMargin,
"""object math3:
| trait Ord[T]
|
| trait Numeric[T] extends Ord[T]:
| extension (x: Int) def numeric: T = ???
|
|object Test3:
| import math3.Numeric
|
| def to[T: Numeric](x: Int): T =
| x.numeric""".stripMargin,
item = "numeric"
)
def testFromImplicitScope(): Unit = doCompletionTest(
s"""class MyList[+T]
|
|object MyList:
| def apply[A](a: A*): MyList[A] = ???
|
| extension [T](xs: MyList[MyList[T]])
| def flatten: MyList[T] = ???
|
|object Test {
| MyList(MyList(1, 2), MyList(3, 4)).fl$CARET
|}""".stripMargin,
"""class MyList[+T]
|
|object MyList:
| def apply[A](a: A*): MyList[A] = ???
|
| extension [T](xs: MyList[MyList[T]])
| def flatten: MyList[T] = ???
|
|object Test {
| MyList(MyList(1, 2), MyList(3, 4)).flatten
|}""".stripMargin,
"flatten")
}
|
JetBrains/intellij-scala
|
scala/scala-impl/test/org/jetbrains/plugins/scala/lang/completion3/ScalaExtensionMethodCompletionTest.scala
|
Scala
|
apache-2.0
| 2,261 |
/**
* Copyright (C) 2009-2011 Scalable Solutions AB <http://scalablesolutions.se>
*/
package akka.util
import java.util.concurrent.ConcurrentSkipListSet
import akka.actor.{ ActorInitializationException, ActorRef }
/**
* A manager for listener actors. Intended for mixin by observables.
*
* @author Martin Krasser
*/
trait ListenerManagement {
private val listeners = new ConcurrentSkipListSet[ActorRef]
/**
* Specifies whether listeners should be started when added and stopped when removed or not
*/
protected def manageLifeCycleOfListeners: Boolean = true
/**
* Adds the <code>listener</code> this registry's listener list.
* The <code>listener</code> is started by this method if manageLifeCycleOfListeners yields true.
*/
def addListener(listener: ActorRef) {
if (manageLifeCycleOfListeners) listener.start()
listeners add listener
}
/**
* Removes the <code>listener</code> this registry's listener list.
* The <code>listener</code> is stopped by this method if manageLifeCycleOfListeners yields true.
*/
def removeListener(listener: ActorRef) {
listeners remove listener
if (manageLifeCycleOfListeners) listener.stop()
}
/*
* Returns whether there are any listeners currently
*/
def hasListeners: Boolean = !listeners.isEmpty
/**
* Checks if a specific listener is registered. ActorInitializationException leads to removal of listener if that
* one isShutdown.
*/
def hasListener(listener: ActorRef): Boolean = listeners.contains(listener)
protected[akka] def notifyListeners(message: => Any) {
if (hasListeners) {
val msg = message
val iterator = listeners.iterator
while (iterator.hasNext) {
val listener = iterator.next
// Uncomment if those exceptions are so frequent as to bottleneck
// if (listener.isShutdown) iterator.remove() else
try {
listener ! msg
} catch {
case e: ActorInitializationException =>
if (listener.isShutdown) iterator.remove()
}
}
}
}
/**
* Execute <code>f</code> with each listener as argument. ActorInitializationException is not handled.
*/
protected[akka] def foreachListener(f: (ActorRef) => Unit) {
val iterator = listeners.iterator
while (iterator.hasNext) {
val listener = iterator.next
if (listener.isRunning) f(listener)
}
}
}
|
felixmulder/scala
|
test/disabled/presentation/akka/src/akka/util/ListenerManagement.scala
|
Scala
|
bsd-3-clause
| 2,419 |
/*
* Copyright (C) Lightbend Inc. <https://www.lightbend.com>
*/
import java.util.regex.Pattern
import bintray.BintrayPlugin.autoImport._
import com.jsuereth.sbtpgp.PgpKeys
import com.typesafe.tools.mima.core.ProblemFilters
import com.typesafe.tools.mima.core._
import com.typesafe.tools.mima.plugin.MimaKeys._
import com.typesafe.tools.mima.plugin.MimaPlugin._
import de.heikoseeberger.sbtheader.AutomateHeaderPlugin
import de.heikoseeberger.sbtheader.FileType
import de.heikoseeberger.sbtheader.CommentStyle
import de.heikoseeberger.sbtheader.HeaderPlugin.autoImport._
import interplay._
import interplay.Omnidoc.autoImport._
import interplay.PlayBuildBase.autoImport._
import interplay.ScalaVersions._
import sbt._
import sbt.Keys._
import sbt.ScriptedPlugin.autoImport._
import sbtwhitesource.WhiteSourcePlugin.autoImport._
import scala.sys.process.stringToProcess
import scala.util.control.NonFatal
object BuildSettings {
val snapshotBranch: String = {
try {
val branch = "git rev-parse --abbrev-ref HEAD".!!.trim
if (branch == "HEAD") {
// not on a branch, get the hash
"git rev-parse HEAD".!!.trim
} else branch
} catch {
case NonFatal(_) => "unknown"
}
}
/** File header settings. */
private def fileUriRegexFilter(pattern: String): FileFilter = new FileFilter {
val compiledPattern = Pattern.compile(pattern)
override def accept(pathname: File): Boolean = {
val uriString = pathname.toURI.toString
compiledPattern.matcher(uriString).matches()
}
}
val fileHeaderSettings = Seq(
excludeFilter in (Compile, headerSources) := HiddenFileFilter ||
fileUriRegexFilter(".*/cookie/encoding/.*") || fileUriRegexFilter(".*/inject/SourceProvider.java$") ||
fileUriRegexFilter(".*/libs/reflect/.*"),
headerLicense := Some(HeaderLicense.Custom("Copyright (C) Lightbend Inc. <https://www.lightbend.com>")),
headerMappings ++= Map(
FileType.xml -> CommentStyle.xmlStyleBlockComment,
FileType.conf -> CommentStyle.hashLineComment
)
)
private val VersionPattern = """^(\\d+).(\\d+).(\\d+)(-.*)?""".r
def evictionSettings: Seq[Setting[_]] = Seq(
// This avoids a lot of dependency resolution warnings to be showed.
evictionWarningOptions in update := EvictionWarningOptions.default
.withWarnTransitiveEvictions(false)
.withWarnDirectEvictions(false)
)
// We are not automatically promoting artifacts to Sonatype and
// Bintray so that we can have more control of the release process
// and do something if somethings fails (for example, if publishing
// a artifact times out).
def playPublishingPromotionSettings: Seq[Setting[_]] = Seq(
playBuildPromoteBintray := false,
playBuildPromoteSonatype := false
)
val DocsApplication = config("docs").hide
val SourcesApplication = config("sources").hide
/** These settings are used by all projects. */
def playCommonSettings: Seq[Setting[_]] = Def.settings(
fileHeaderSettings,
homepage := Some(url("https://playframework.com")),
ivyLoggingLevel := UpdateLogging.DownloadOnly,
resolvers ++= Seq(
// using this variant due to sbt#5405
"sonatype-service-local-releases"
.at("https://oss.sonatype.org/service/local/repositories/releases/content/"), // sync ScriptedTools.scala
Resolver.typesafeRepo("releases"),
Resolver.typesafeIvyRepo("releases"),
Resolver.sbtPluginRepo("releases"), // weird sbt-pgp/play docs/vegemite issue
),
evictionSettings,
ivyConfigurations ++= Seq(DocsApplication, SourcesApplication),
javacOptions ++= Seq("-encoding", "UTF-8", "-Xlint:unchecked", "-Xlint:deprecation"),
scalacOptions in (Compile, doc) := {
// disable the new scaladoc feature for scala 2.12.0, might be removed in 2.12.0-1 (https://github.com/scala/scala-dev/issues/249)
CrossVersion.partialVersion(scalaVersion.value) match {
case Some((2, v)) if v >= 12 => Seq("-no-java-comments")
case _ => Seq()
}
},
fork in Test := true,
parallelExecution in Test := false,
testListeners in (Test, test) := Nil,
javaOptions in Test ++= Seq("-XX:MaxMetaspaceSize=384m", "-Xmx512m", "-Xms128m"),
testOptions ++= Seq(
Tests.Argument(TestFrameworks.Specs2, "showtimes"),
Tests.Argument(TestFrameworks.JUnit, "-v")
),
bintrayPackage := "play-sbt-plugin",
playPublishingPromotionSettings,
version ~= { v =>
v +
sys.props.get("akka.version").map("-akka-" + _).getOrElse("") +
sys.props.get("akka.http.version").map("-akka-http-" + _).getOrElse("")
},
apiURL := {
val v = version.value
if (isSnapshot.value) {
v match {
case VersionPattern(epoch, major, _, _) =>
Some(url(raw"https://www.playframework.com/documentation/$epoch.$major.x/api/scala/index.html"))
case _ => Some(url("https://www.playframework.com/documentation/latest/api/scala/index.html"))
}
} else {
Some(url(raw"https://www.playframework.com/documentation/$v/api/scala/index.html"))
}
},
autoAPIMappings := true,
apiMappings ++= {
val scalaInstance = Keys.scalaInstance.value
scalaInstance.libraryJars.map { libraryJar =>
libraryJar -> url(
raw"""http://scala-lang.org/files/archive/api/${scalaInstance.actualVersion}/index.html"""
)
}.toMap
},
apiMappings ++= {
// Maps JDK 1.8 jar into apidoc.
val rtJar = sys.props
.get("sun.boot.class.path")
.flatMap(cp =>
cp.split(java.io.File.pathSeparator).collectFirst {
case str if str.endsWith(java.io.File.separator + "rt.jar") => str
}
)
rtJar match {
case None => Map.empty
case Some(rtJar) => Map(file(rtJar) -> url(Docs.javaApiUrl))
}
},
apiMappings ++= {
// Finds appropriate scala apidoc from dependencies when autoAPIMappings are insufficient.
// See the following:
//
// http://stackoverflow.com/questions/19786841/can-i-use-sbts-apimappings-setting-for-managed-dependencies/20919304#20919304
// http://www.scala-sbt.org/release/docs/Howto-Scaladoc.html#Enable+manual+linking+to+the+external+Scaladoc+of+managed+dependencies
// https://github.com/ThoughtWorksInc/sbt-api-mappings/blob/master/src/main/scala/com/thoughtworks/sbtApiMappings/ApiMappings.scala#L34
val ScalaLibraryRegex = """^.*[/\\\\]scala-library-([\\d\\.]+)\\.jar$""".r
val JavaxInjectRegex = """^.*[/\\\\]java.inject-([\\d\\.]+)\\.jar$""".r
val IvyRegex = """^.*[/\\\\]([\\.\\-_\\w]+)[/\\\\]([\\.\\-_\\w]+)[/\\\\](?:jars|bundles)[/\\\\]([\\.\\-_\\w]+)\\.jar$""".r
(for {
jar <- (dependencyClasspath in Compile in doc).value.toSet ++ (dependencyClasspath in Test in doc).value
fullyFile = jar.data
urlOption = fullyFile.getCanonicalPath match {
case ScalaLibraryRegex(v) =>
Some(url(raw"""http://scala-lang.org/files/archive/api/$v/index.html"""))
case JavaxInjectRegex(v) =>
// the jar file doesn't match up with $apiName-
Some(url(Docs.javaxInjectUrl))
case re @ IvyRegex(apiOrganization, apiName, jarBaseFile) if jarBaseFile.startsWith(s"$apiName-") =>
val apiVersion = jarBaseFile.substring(apiName.length + 1, jarBaseFile.length)
apiOrganization match {
case "com.typesafe.akka" =>
Some(url(raw"https://doc.akka.io/api/akka/$apiVersion/"))
case default =>
val link = Docs.artifactToJavadoc(apiOrganization, apiName, apiVersion, jarBaseFile)
Some(url(link))
}
case other =>
None
}
url <- urlOption
} yield (fullyFile -> url))(collection.breakOut(Map.canBuildFrom))
}
)
// Versions of previous minor releases being checked for binary compatibility
val mimaPreviousVersion: Option[String] = Some("2.8.0")
/**
* These settings are used by all projects that are part of the runtime, as opposed to the development mode of Play.
*/
def playRuntimeSettings: Seq[Setting[_]] = Def.settings(
playCommonSettings,
mimaDefaultSettings,
mimaPreviousArtifacts := mimaPreviousVersion.map { version =>
val cross = if (crossPaths.value) CrossVersion.binary else CrossVersion.disabled
(organization.value %% moduleName.value % version).cross(cross)
}.toSet,
mimaBinaryIssueFilters ++= Seq(
// Remove deprecated methods from HttpRequestHandler
ProblemFilters.exclude[DirectMissingMethodProblem]("play.api.http.DefaultHttpRequestHandler.filterHandler"),
ProblemFilters.exclude[DirectMissingMethodProblem]("play.api.http.DefaultHttpRequestHandler.this"),
ProblemFilters.exclude[DirectMissingMethodProblem]("play.api.http.JavaCompatibleHttpRequestHandler.this"),
// Refactor params of runEvolutions (ApplicationEvolutions however is private anyway)
ProblemFilters.exclude[IncompatibleMethTypeProblem]("play.api.db.evolutions.ApplicationEvolutions.runEvolutions"),
// Removed @varargs (which removed the array forwarder method)
ProblemFilters.exclude[DirectMissingMethodProblem]("play.api.libs.typedmap.DefaultTypedMap.-"),
),
unmanagedSourceDirectories in Compile += {
val suffix = CrossVersion.partialVersion(scalaVersion.value) match {
case Some((x, y)) => s"$x.$y"
case None => scalaBinaryVersion.value
}
(sourceDirectory in Compile).value / s"scala-$suffix"
},
// Argument for setting size of permgen space or meta space for all forked processes
Docs.apiDocsInclude := true
)
/** A project that is shared between the sbt runtime and the Play runtime. */
def PlayNonCrossBuiltProject(name: String, dir: String): Project = {
Project(name, file(dir))
.enablePlugins(PlaySbtLibrary, AutomateHeaderPlugin)
.settings(playRuntimeSettings: _*)
.settings(omnidocSettings: _*)
.settings(
autoScalaLibrary := false,
crossPaths := false,
crossScalaVersions := Seq(scala212)
)
}
/** A project that is only used when running in development. */
def PlayDevelopmentProject(name: String, dir: String): Project = {
Project(name, file(dir))
.enablePlugins(PlayLibrary, AutomateHeaderPlugin)
.settings(
playCommonSettings,
(javacOptions in compile) ~= (_.map {
case "1.8" => "1.6"
case other => other
}),
mimaPreviousArtifacts := Set.empty,
)
}
/** A project that is in the Play runtime. */
def PlayCrossBuiltProject(name: String, dir: String): Project = {
Project(name, file(dir))
.enablePlugins(PlayLibrary, AutomateHeaderPlugin, AkkaSnapshotRepositories)
.settings(playRuntimeSettings: _*)
.settings(omnidocSettings: _*)
.settings(
scalacOptions += "-target:jvm-1.8"
)
}
def omnidocSettings: Seq[Setting[_]] = Def.settings(
Omnidoc.projectSettings,
omnidocSnapshotBranch := snapshotBranch,
omnidocPathPrefix := ""
)
def playScriptedSettings: Seq[Setting[_]] = Seq(
// Don't automatically publish anything.
// The test-sbt-plugins-* scripts publish before running the scripted tests.
// When developing the sbt plugins:
// * run a publishLocal in the root project to get everything
// * run a publishLocal in the changes projects for fast feedback loops
scriptedDependencies := (()), // drop Test/compile & publishLocal
scriptedBufferLog := false,
scriptedLaunchOpts ++= Seq(
s"-Dsbt.boot.directory=${file(sys.props("user.home")) / ".sbt" / "boot"}",
"-Xmx512m",
"-XX:MaxMetaspaceSize=512m",
s"-Dscala.version=$scala212",
),
scripted := scripted.tag(Tags.Test).evaluated,
)
def disablePublishing = Def.settings(
disableNonLocalPublishing,
// This setting will work for sbt 1, but not 0.13. For 0.13 it only affects
// `compile` and `update` tasks.
skip in publish := true,
publishLocal := {},
)
def disableNonLocalPublishing = Def.settings(
// For sbt 0.13 this is what we need to avoid publishing. These settings can
// be removed when we move to sbt 1.
PgpKeys.publishSigned := {},
publish := {},
// We also don't need to track dependencies for unpublished projects
// so we need to disable WhiteSource plugin.
whitesourceIgnore := true
)
/** A project that runs in the sbt runtime. */
def PlaySbtProject(name: String, dir: String): Project = {
Project(name, file(dir))
.enablePlugins(PlaySbtLibrary, AutomateHeaderPlugin)
.settings(
playCommonSettings,
mimaPreviousArtifacts := Set.empty,
)
}
/** A project that *is* an sbt plugin. */
def PlaySbtPluginProject(name: String, dir: String): Project = {
Project(name, file(dir))
.enablePlugins(PlaySbtPlugin, AutomateHeaderPlugin)
.settings(
playCommonSettings,
playScriptedSettings,
fork in Test := false,
mimaPreviousArtifacts := Set.empty,
)
}
}
|
benmccann/playframework
|
project/BuildSettings.scala
|
Scala
|
apache-2.0
| 13,220 |
import org.specs2.mutable._
import CSVLang._
class Week10Tests extends Specification {
def getStream(x:CsvData):Stream[Stream[String]] = x match { case CsvData(x,_) => x }
def toList(x:Stream[Stream[String]]):List[List[String]] = x.map(x => x.toList).toList
"Week10Tests" should {
//////////////////////////////////////// VSI TESTI UPORABLAJO TA STRING !
val string =
"""a,1,2
b,2,3
"""
////////////////////////////////////////
val c: CsvData = CSVLang.toCsvData(string)
"parseToCsvData" in {
toList(getStream(c)) mustEqual List(List("a", "1", "2"), List("b", "2", "3"))
}
"merge CsvData" in {
toList(getStream(c >>> DataMerge(c))) mustEqual List(List("a", "1", "2"), List("b", "2", "3"),List("a", "1", "2"), List("b", "2", "3"))
}
"dropCol" in {
toList(getStream(c >>> DropCol(0))) mustEqual List(List("1", "2"), List("2", "3"))
toList(getStream(c >>> DropCol(1))) mustEqual List(List("a","2"), List("b", "3"))
}
"swapCol" in {
toList(getStream(c >>> SwapCol(0,1))) mustEqual List(List("1", "a", "2"), List("2", "b", "3"))
toList(getStream(c >>> SwapCol(0,0))) mustEqual List(List("a", "1", "2"), List("b", "2", "3"))
}
"reverseRows" in {
toList(getStream(c >>> RevAction(true))) mustEqual List(List("2", "1", "a"), List("3", "2", "b"))
}
"sequence" in {
toList(getStream(c >>> DropCol(0) --> SwapCol(0,1))) mustEqual List(List("2", "1"), List("3", "2"))
toList(getStream(c >>> (drop, 0) --> (swap, 0, 1) --> (out, "test.csv"))) mustEqual List(List("2", "1"), List("3", "2"))
toList(getStream(c >>> DataMerge(c) --> DropCol(0) --> SwapCol(0,1) --> RevAction(true))) mustEqual List(List("1", "2"), List("2", "3"),List("1", "2"), List("2", "3"))
toList(getStream(c >>> DataMerge(c) --> (drop,2) --> (drop,1))) mustEqual List(List("a"), List("b"), List("a"), List("b"))
toList(getStream(("test.csv") >>> revrow --> (drop,0) --> screen)) mustEqual List(List("2"), List("3"))
toList(getStream(("test.csv") >>> screen)) mustEqual List(List("2", "1"), List("3", "2"))
}
}
}
|
Meemaw/scalaProgramming
|
week10/src/test/scala/MainTests.scala
|
Scala
|
mit
| 2,063 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.netty
import java.io.File
import java.nio.ByteBuffer
import scala.collection.JavaConverters._
import scala.concurrent.{Future, Promise}
import scala.reflect.ClassTag
import org.apache.spark.{SecurityManager, SparkConf}
import org.apache.spark.network._
import org.apache.spark.network.buffer.ManagedBuffer
import org.apache.spark.network.client.{RpcResponseCallback, TransportClientBootstrap, TransportClientFactory}
import org.apache.spark.network.crypto.{AuthClientBootstrap, AuthServerBootstrap}
import org.apache.spark.network.server._
import org.apache.spark.network.shuffle.{BlockFetchingListener, OneForOneBlockFetcher, RetryingBlockFetcher}
import org.apache.spark.network.shuffle.protocol.UploadBlock
import org.apache.spark.network.util.JavaUtils
import org.apache.spark.serializer.JavaSerializer
import org.apache.spark.storage.{BlockId, StorageLevel}
import org.apache.spark.util.Utils
/**
* A BlockTransferService that uses Netty to fetch a set of blocks at time.
* 网络数据传输时 需要用到 读取远端数据时需要用到
*
*/
private[spark] class NettyBlockTransferService(
conf: SparkConf,
securityManager: SecurityManager,
bindAddress: String,
override val hostName: String,
_port: Int,
numCores: Int)
extends BlockTransferService {
// TODO: Don't use Java serialization, use a more cross-version compatible serialization format.
private val serializer = new JavaSerializer(conf)
private val authEnabled = securityManager.isAuthenticationEnabled()
private val transportConf = SparkTransportConf.fromSparkConf(conf, "shuffle", numCores)
private[this] var transportContext: TransportContext = _
private[this] var server: TransportServer = _
private[this] var clientFactory: TransportClientFactory = _
private[this] var appId: String = _
// 启动BlockTransferServer 服务
override def init(blockDataManager: BlockDataManager): Unit = {
val rpcHandler = new NettyBlockRpcServer(conf.getAppId, serializer, blockDataManager)
var serverBootstrap: Option[TransportServerBootstrap] = None
var clientBootstrap: Option[TransportClientBootstrap] = None
if (authEnabled) {
serverBootstrap = Some(new AuthServerBootstrap(transportConf, securityManager))
clientBootstrap = Some(new AuthClientBootstrap(transportConf, conf.getAppId, securityManager))
}
transportContext = new TransportContext(transportConf, rpcHandler)
clientFactory = transportContext.createClientFactory(clientBootstrap.toSeq.asJava)
server = createServer(serverBootstrap.toList)
appId = conf.getAppId
logInfo(s"Server created on ${hostName}:${server.getPort}")
}
/** Creates and binds the TransportServer, possibly trying multiple ports. */
private def createServer(bootstraps: List[TransportServerBootstrap]): TransportServer = {
def startService(port: Int): (TransportServer, Int) = {
val server = transportContext.createServer(bindAddress, port, bootstraps.asJava)
(server, server.getPort)
}
Utils.startServiceOnPort(_port, startService, conf, getClass.getName)._1
}
/**
* 从远程服务器上获取数据
* @param host 远程host
* @param port 远程port
* @param execId executorId
* @param blockIds blockIds
* @param listener
* @param shuffleFiles
*/
override def fetchBlocks(
host: String,
port: Int,
execId: String,
blockIds: Array[String],
listener: BlockFetchingListener,
shuffleFiles: Array[File]): Unit = {
logTrace(s"Fetch blocks from $host:$port (executor id $execId)")
try {
val blockFetchStarter = new RetryingBlockFetcher.BlockFetchStarter {
override def createAndStart(blockIds: Array[String], listener: BlockFetchingListener) {
// 根据远程 节点的的host 和 port 创建通信客户端 TransportClient
val client = clientFactory.createClient(host, port)
// 通过客户端向指定的节点发送获取数据消息
new OneForOneBlockFetcher(client, appId, execId, blockIds.toArray, listener,
transportConf, shuffleFiles).start()
}
}
val maxRetries = transportConf.maxIORetries()
if (maxRetries > 0) {
// Note this Fetcher will correctly handle maxRetries == 0; we avoid it just in case there's
// a bug in this code. We should remove the if statement once we're sure of the stability.
new RetryingBlockFetcher(transportConf, blockFetchStarter, blockIds, listener).start()
} else {
blockFetchStarter.createAndStart(blockIds, listener)
}
} catch {
case e: Exception =>
logError("Exception while beginning fetchBlocks", e)
blockIds.foreach(listener.onBlockFetchFailure(_, e))
}
}
override def port: Int = server.getPort
override def uploadBlock(
hostname: String,
port: Int,
execId: String,
blockId: BlockId,
blockData: ManagedBuffer,
level: StorageLevel,
classTag: ClassTag[_]): Future[Unit] = {
val result = Promise[Unit]()
val client = clientFactory.createClient(hostname, port)
// StorageLevel and ClassTag are serialized as bytes using our JavaSerializer.
// Everything else is encoded using our binary protocol.
val metadata = JavaUtils.bufferToArray(serializer.newInstance().serialize((level, classTag)))
// Convert or copy nio buffer into array in order to serialize it.
val array = JavaUtils.bufferToArray(blockData.nioByteBuffer())
client.sendRpc(new UploadBlock(appId, execId, blockId.toString, metadata, array).toByteBuffer,
new RpcResponseCallback {
override def onSuccess(response: ByteBuffer): Unit = {
logTrace(s"Successfully uploaded block $blockId")
result.success((): Unit)
}
override def onFailure(e: Throwable): Unit = {
logError(s"Error while uploading block $blockId", e)
result.failure(e)
}
})
result.future
}
override def close(): Unit = {
if (server != null) {
server.close()
}
if (clientFactory != null) {
clientFactory.close()
}
}
}
|
wangyixiaohuihui/spark2-annotation
|
core/src/main/scala/org/apache/spark/network/netty/NettyBlockTransferService.scala
|
Scala
|
apache-2.0
| 7,010 |
/*
* Copyright 2012-2020 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package laika.directive.std
import cats.syntax.all._
import laika.ast._
import laika.directive.{Blocks, Templates}
import laika.parse.SourceFragment
import scala.annotation.tailrec
/** Provides the implementation for the standard breadcrumb directives.
*
* This includes the template and markup-block variants of this directive,
* which builds a navigation list from the root node of the input tree to the current document.
*
* For full documentation see the section about the
* [[https://planet42.github.io/Laika/07-reference/01-standard-directives.html#breadcrumb Breadcrumb Directive]]
* in the manual.
*
* @author Jens Halm
*/
object BreadcrumbDirectives {
/** A block resolver that replaces itself with a navigation list from the root node of the input tree to the current document
* during AST transformations.
*
* Serves as the implementation for the breadcrumb directive, but can also be inserted into the AST manually.
*/
case class BreadcrumbBuilder (source: SourceFragment, options: Options = NoOpt) extends BlockResolver {
type Self = BreadcrumbBuilder
def resolve (cursor: DocumentCursor): Block = {
val context = NavigationBuilderContext(
refPath = cursor.path,
itemStyles = Set(Style.breadcrumb.styles.head)
)
@tailrec
def entriesFor(tree: TreeCursor,items:List[NavigationItem] = Nil) :List[NavigationItem] = {
val title = tree.target.title.getOrElse(SpanSequence(tree.path.name))
val item = context.newNavigationItem(title, tree.target.titleDocument, Nil, tree.target.targetFormats)
tree.parent match {
case None => item :: items
case Some(parent) => entriesFor(parent,item ::items)
}
}
val docEntry = {
val title = cursor.target.title.getOrElse(SpanSequence(cursor.path.name))
context.newNavigationItem(title, Some(cursor.target), Nil, cursor.target.targetFormats)
}
NavigationList(entriesFor(cursor.parent) :+ docEntry, Style.breadcrumb)
}
def withOptions (options: Options): BreadcrumbBuilder = copy(options = options)
lazy val unresolvedMessage: String = "Unresolved breadcrumb builder"
}
/** Implementation of the `breadcrumb` directive for templates.
*/
lazy val forTemplates: Templates.Directive = Templates.create("breadcrumb") {
import Templates.dsl._
(cursor, Templates.dsl.source).mapN { case (cursor, src) =>
TemplateElement(BreadcrumbBuilder(src).resolve(cursor))
}
}
/** Implementation of the `breadcrumb` directive for block elements in markup documents.
*/
lazy val forBlocks: Blocks.Directive = Blocks.create("breadcrumb") {
import Blocks.dsl._
(cursor, Blocks.dsl.source).mapN { case (cursor, src) =>
BreadcrumbBuilder(src).resolve(cursor)
}
}
}
|
planet42/Laika
|
core/shared/src/main/scala/laika/directive/std/BreadcrumbDirectives.scala
|
Scala
|
apache-2.0
| 3,485 |
package scalarules.test.junit
import org.junit.Test
//Used to verify a folder doesn't match in test discovery.
//See JunitMatchesOnlyFilesEvenIfFolderMatchesPattern target
class scala {
@Test
def atLeastOneTestIsNeeded: Unit = {
}
}
|
ianoc/rules_scala
|
test/src/main/scala/scalarules/test/junit/scala.scala
|
Scala
|
apache-2.0
| 242 |
package sand.gcs.system.distributed
import akka.actor.{ Actor, ActorRef, ActorSystem, Address, AddressFromURIString, Deploy, Props }
import akka.remote.{ RemoteClientLifeCycleEvent, RemoteScope }
import com.typesafe.config.ConfigFactory
import java.net.InetAddress
import sand.gcs.system.GraphCoordinateSystem
import sand.gcs.system.distributed.Master._
import sand.gcs.system.distributed.Reaper._
import sand.gcs.util.Config.config
import sand.gcs.util.DistanceStore
import scala.Console.err
import scala.collection.JavaConverters._
/** Helper object for distributed implementations of graph coordinate systems. */
object DistributedGCS {
private val akkaConfig = ConfigFactory.parseString("""
akka {
actor {
provider = "akka.remote.RemoteActorRefProvider"
}
remote {
netty {
hostname = """ + "\"" + InetAddress.getLocalHost().getHostName() + "\"" + """
message-frame-size = 500 MiB
port = 2554
}
}
}
""")
/** Begins executing the graph coordinate system embedding in a distributed environment.
*
* Making this call will spawn a "Master" on the same JVM. It is expected that before
* this call is made, all machines that have been specified in the configuration file
* list gcs.deploy.active have [[sand.gcs.system.distributed.Worker]] running.
*
* Once the Master is spawned, it will begin embedding the landmarks in a single threaded
* fashion. Once the landmarks are embedded, it will send a message to all Workers which
* triggers them to spawn Worker actors and load the partially completed GCS in memory.
* The number of Worker actors spawned is specified in configuration under
* gcs.deploy.[worker address].nr-of-workers. This means for each worker in the
* gcs.deploy.active list there must be a corresponding entry in
* gcs.deploy.[worker address].nr-of-workers.
*
* Each Worker will then send a message to the Master notifying the Master of the Worker's
* existence, and the Master will begin sending the Workers one non-landmark ID at a time.
*
* Once all non-landmark ID's have been distributed and the results received, this will
* trigger the Master to write the result to disk, and gracefully shutdown the entire
* system.
*
* @param gcs Graph Coordinate System instance to embed with
* @param distanceStore Container that holds the requisite distances to embed
* @param nodeIds A sequence of non-landmark IDs to embed (the "work")
* @param outputFilename Name of the file to write the results of the embedding to
*/
def execute[CoordType](
gcs: GraphCoordinateSystem[CoordType],
distanceStore: DistanceStore,
nodeIds: Seq[Int],
outputFilename: String) =
new DistributedGCS(gcs, distanceStore, nodeIds, outputFilename)
/** Creates a Props object for Akka Actors.
*
* Putting this inside the DistributedGCS class may cause it to create a closure
* which captures the outer scope, including the ActorSystemImpl in Akka, which
* is not intended to be Serialized. This helper function prevents the creation
* of a closure.
*/
private def workerProps[CoordType](
workerAddress: Address,
master: ActorRef,
workerReaper: ActorRef,
gcs: GraphCoordinateSystem[CoordType],
distanceStore: DistanceStore) =
Props(new Worker(master, workerReaper, gcs, distanceStore)).withDeploy(Deploy(scope = RemoteScope(workerAddress)))
}
/** Class that provides an ActorSystem environment and sets the foundation for the system.
*
* Upon instantiation, this class will create an ActorSystem, spawn the Master,
* the Reaper ("kills" Actors after computation is completed) and spawns the Workers
* on remote machines.
*/
class DistributedGCS[CoordType] private (gcs: GraphCoordinateSystem[CoordType], distanceStore: DistanceStore, nodes: Seq[Int], outputFilename: String) extends Serializable {
import DistributedGCS._
val system = ActorSystem("GCS", ConfigFactory.load(akkaConfig))
val master = system.actorOf(Props(new Master(gcs, outputFilename)), "master")
val reaper = system.actorOf(Props[Reaper], "reaper")
reaper ! WatchMe(master)
system.eventStream.subscribe(master, classOf[RemoteClientLifeCycleEvent])
err.println("Master deployed on " + InetAddress.getLocalHost().getHostName + " as " + master)
nodes.map(DistributeWork(_)).foreach(master ! _)
master ! AllWorkSent
val workerNodes = config.getStringList("gcs.deploy.active").asScala
workerNodes.foreach { node =>
val workerReaper = system.actorFor(s"akka://Worker@$node" + ":2552/user/reaper")
val workerAddress = AddressFromURIString(s"akka://Worker@$node" + ":2552")
val numberOfWorkers = config.getInt("gcs.deploy." + node + ".nr-of-workers")
for (i <- 0 until numberOfWorkers) {
system.actorOf(workerProps(workerAddress, master, workerReaper, gcs, distanceStore))
}
}
}
|
snowland/scala-gcs
|
src/main/scala/sand/gcs/system/distributed/DistributedGCS.scala
|
Scala
|
bsd-3-clause
| 4,966 |
package wow.auth.handlers
import akka.pattern.ask
import akka.util.Timeout
import wow.auth.AccountsState
import wow.auth.AccountsState.IsOnline
import wow.auth.data.Account
import wow.auth.protocol.AuthResults
import wow.auth.protocol.packets.{ClientLogonProof, ServerLogonProof, ServerLogonProofFailure, ServerLogonProofSuccess}
import wow.auth.session.AuthSession.EventIncoming
import wow.auth.session._
import wow.auth.utils.PacketSerializer
import scala.concurrent.Await
import scala.concurrent.duration._
/**
* Handles logon proofs
*/
trait LogonProofHandler {
this: AuthSession =>
def handleProof: StateFunction = {
case Event(EventIncoming(bits), ChallengeData(login, srp6Identity, srp6Challenge)) =>
def fail(reason: AuthResults.AuthResult) = {
sendPacket(ServerLogonProof(reason, None, Some(ServerLogonProofFailure())))
goto(StateFailed) using NoData
}
log.debug("Received proof")
val packet = PacketSerializer.deserialize[ClientLogonProof](bits)
log.debug(packet.toString)
srp6.verify(login, packet.clientKey, packet.clientProof, srp6Identity, srp6Challenge) match {
case Some(srp6Validation) =>
val accountState = context.actorSelection(AccountsState.ActorPath)
implicit val timeout = Timeout(5 seconds)
val askIsOnline = (accountState ? IsOnline(login)).mapTo[Boolean]
val isOnline = Await.result(askIsOnline, timeout.duration)
if (isOnline) {
fail(AuthResults.FailAlreadyOnline)
} else {
Account.findByLogin(login) match {
case Some(account) =>
Account.save(account.copy(sessionKey = Some(srp6Validation.sharedKey)))
sendPacket(ServerLogonProof(AuthResults.Success,
Some(ServerLogonProofSuccess(srp6Validation.serverProof)),
None))
goto(StateRealmlist) using RealmsListData(account)
case None =>
fail(AuthResults.FailUnknownAccount)
}
}
case None =>
fail(AuthResults.FailUnknownAccount)
}
}
}
|
SKNZ/SpinaciCore
|
wow/core/src/main/scala/wow/auth/handlers/LogonProofHandler.scala
|
Scala
|
mit
| 2,141 |
package dundertext.editor
import dundertext.data.Time
trait Player {
def cue(time: Time): Unit
def currentTime: Time
def isPaused: Boolean
def play(): Unit
def pause(): Unit
def cueStart(): Unit
def cueEnd(): Unit
def seek(offsetMillis: Int): Unit
def playUntil(time: Time): Unit
}
|
dundertext/dundertext
|
editor/src/main/scala/dundertext/editor/Player.scala
|
Scala
|
gpl-3.0
| 302 |
package leon.gametictactoe
import scala.scalajs.js.JSApp
import scala.scalajs.js.annotation.JSExport
import org.scalajs.dom
import dom.document
import dom.html
import leon.lang._
import leon.util.Random
import leon.lang.StaticChecks._
@JSExport
object Main {
import GameTicTacToe._
type Ctx2D = dom.CanvasRenderingContext2D
val CellWidth = 300
val CellHeight = 300
@JSExport
def main(c: html.Canvas): Unit = {
implicit val randomState = Random.newState
println("Welcome to Tic Tac Toe!")
val game = Game(
LevelMap(Cell(None()), Cell(None()), Cell(None()),
Cell(None()), Cell(None()), Cell(None()),
Cell(None()), Cell(None()), Cell(None())),
PlayerCross)
renderGame(game.map, "Start game with X")(c)
// Mouse click for tictactoe
c.onmousedown = {
(e: dom.MouseEvent) =>
(1 to 3).foreach { i =>
(1 to 3).foreach { j =>
if((e.clientX <= i * CellWidth) && (e.clientX > (i - 1) * CellWidth) && (e.clientY <= j * CellHeight) && (e.clientY > (j - 1) * CellHeight)) {
println(s"at $i, $j")
if(game.map.isFree(j, i)) {
val player = game.currentPlayer
game.doPlay(j, i)
if(player.isCross) {
println("placing cross")
if(checkGameEnded(game.map)) {
renderGameOver("X")(c)
} else {
renderGame(game.map, "O's turn")(c)
}
} else {
println("placing circle")
if(checkGameEnded(game.map)) {
renderGameOver("O")(c)
} else {
renderGame(game.map, "X's turn")(c)
}
}
}
}
}
}
}
}
def renderGame(map: LevelMap, msg: String)(c: html.Canvas): Unit = {
val ctx = c.getContext("2d").asInstanceOf[Ctx2D]
ctx.clearRect(0, 0, 900, 900)
var x = 0
var y = 0
renderCell(map.c11, x, y)(ctx)
x += CellWidth
renderCell(map.c12, x, y)(ctx)
x += CellWidth
renderCell(map.c13, x, y)(ctx)
x = 0
y += CellHeight
renderCell(map.c21, x, y)(ctx)
x += CellWidth
renderCell(map.c22, x, y)(ctx)
x += CellWidth
renderCell(map.c23, x, y)(ctx)
x = 0
y += CellHeight
renderCell(map.c31, x, y)(ctx)
x += CellWidth
renderCell(map.c32, x, y)(ctx)
x += CellWidth
renderCell(map.c33, x, y)(ctx)
x = 0
y += CellHeight
ctx.stroke()
ctx.font = "20px Georgia"
y -= 7
x += 3
ctx.fillText(msg, x, y)
ctx.stroke()
}
def renderGameOver(player: String)(c: html.Canvas): Unit = {
val ctx = c.getContext("2d").asInstanceOf[Ctx2D]
ctx.clearRect(0, 0, 900, 900)
var x = 0
var y = CellHeight
ctx.strokeStyle = "black"
ctx.font = "40px Georgia"
ctx.fillText(s"GAME OVER, $player wins!\\nRefresh to restart!", x, y)
ctx.stroke()
}
def renderCell(c: GameTicTacToe.Cell, x: Int, y: Int)(ctx: Ctx2D): Unit = {
ctx.strokeStyle = "black"
ctx.lineWidth = 6
ctx.rect(x, y, CellWidth, CellHeight)
ctx.font = "120px Georgia"
val cx = (2*x + CellWidth)/2 - 30
val cy = (2*y + CellHeight)/2 + 40
val elem = c.n match {
case Some(PlayerCross) => "X"
case Some(PlayerCircle) => "O"
case None() => ""
}
ctx.fillText(elem, cx, cy)
}
}
|
epfl-lara/leon
|
examples/gametictactoe/src/main/scala/leon/gametictactoe/Main.scala
|
Scala
|
gpl-3.0
| 3,448 |
package com.sksamuel.scapegoat.inspections.collections
import com.sksamuel.scapegoat._
/**
* @author Stephen Samuel
*
* Inspired by IntelliJ
*
* Checks for filter.size > 0, filter.size == 0, etc
*/
class FilterDotSizeComparison extends Inspection {
def inspector(context: InspectionContext): Inspector = new Inspector(context) {
override def postTyperTraverser = Some apply new context.Traverser {
import context.global._
override def inspect(tree: Tree): Unit = {
tree match {
// todo
case Select(Apply(Select(_, TermName("filter")), _), TermName("isEmpty")) =>
context.warn("filter().isEmpty instead of !exists()",
tree.pos,
Levels.Info,
".filter(x => Bool).isEmpty can be replaced with !exists(x => Bool): " + tree.toString().take(500),
FilterDotSizeComparison.this)
case _ => continue(tree)
}
}
}
}
}
|
pwwpche/scalac-scapegoat-plugin
|
src/main/scala/com/sksamuel/scapegoat/inspections/collections/FilterDotSizeComparison.scala
|
Scala
|
apache-2.0
| 976 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.optimizer
import scala.collection.mutable.ArrayBuffer
import org.apache.spark.api.java.function.FilterFunction
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.expressions.objects._
import org.apache.spark.sql.catalyst.plans.logical._
import org.apache.spark.sql.catalyst.rules._
import org.apache.spark.sql.types.{ArrayType, DataType, MapType, StructType, UserDefinedType}
/*
* This file defines optimization rules related to object manipulation (for the Dataset API).
*/
/**
* Removes cases where we are unnecessarily going between the object and serialized (InternalRow)
* representation of data item. For example back to back map operations.
*/
object EliminateSerialization extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan transform {
case d @ DeserializeToObject(_, _, s: SerializeFromObject)
if d.outputObjAttr.dataType == s.inputObjAttr.dataType =>
// Adds an extra Project here, to preserve the output expr id of `DeserializeToObject`.
// We will remove it later in RemoveAliasOnlyProject rule.
val objAttr = Alias(s.inputObjAttr, s.inputObjAttr.name)(exprId = d.outputObjAttr.exprId)
Project(objAttr :: Nil, s.child)
case a @ AppendColumns(_, _, _, _, _, s: SerializeFromObject)
if a.deserializer.dataType == s.inputObjAttr.dataType =>
AppendColumnsWithObject(a.func, s.serializer, a.serializer, s.child)
// If there is a `SerializeFromObject` under typed filter and its input object type is same with
// the typed filter's deserializer, we can convert typed filter to normal filter without
// deserialization in condition, and push it down through `SerializeFromObject`.
// e.g. `ds.map(...).filter(...)` can be optimized by this rule to save extra deserialization,
// but `ds.map(...).as[AnotherType].filter(...)` can not be optimized.
case f @ TypedFilter(_, _, _, _, s: SerializeFromObject)
if f.deserializer.dataType == s.inputObjAttr.dataType =>
s.copy(child = f.withObjectProducerChild(s.child))
// If there is a `DeserializeToObject` upon typed filter and its output object type is same with
// the typed filter's deserializer, we can convert typed filter to normal filter without
// deserialization in condition, and pull it up through `DeserializeToObject`.
// e.g. `ds.filter(...).map(...)` can be optimized by this rule to save extra deserialization,
// but `ds.filter(...).as[AnotherType].map(...)` can not be optimized.
case d @ DeserializeToObject(_, _, f: TypedFilter)
if d.outputObjAttr.dataType == f.deserializer.dataType =>
f.withObjectProducerChild(d.copy(child = f.child))
}
}
/**
* Combines two adjacent [[TypedFilter]]s, which operate on same type object in condition, into one,
* merging the filter functions into one conjunctive function.
*/
object CombineTypedFilters extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan transform {
case t1 @ TypedFilter(_, _, _, _, t2 @ TypedFilter(_, _, _, _, child))
if t1.deserializer.dataType == t2.deserializer.dataType =>
TypedFilter(
combineFilterFunction(t2.func, t1.func),
t1.argumentClass,
t1.argumentSchema,
t1.deserializer,
child)
}
private def combineFilterFunction(func1: AnyRef, func2: AnyRef): Any => Boolean = {
(func1, func2) match {
case (f1: FilterFunction[_], f2: FilterFunction[_]) =>
input => f1.asInstanceOf[FilterFunction[Any]].call(input) &&
f2.asInstanceOf[FilterFunction[Any]].call(input)
case (f1: FilterFunction[_], f2) =>
input => f1.asInstanceOf[FilterFunction[Any]].call(input) &&
f2.asInstanceOf[Any => Boolean](input)
case (f1, f2: FilterFunction[_]) =>
input => f1.asInstanceOf[Any => Boolean].apply(input) &&
f2.asInstanceOf[FilterFunction[Any]].call(input)
case (f1, f2) =>
input => f1.asInstanceOf[Any => Boolean].apply(input) &&
f2.asInstanceOf[Any => Boolean].apply(input)
}
}
}
/**
* Removes MapObjects when the following conditions are satisfied
* 1. Mapobject(... lambdavariable(..., false) ...), which means types for input and output
* are primitive types with non-nullable
* 2. no custom collection class specified representation of data item.
*/
object EliminateMapObjects extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan transformAllExpressions {
case MapObjects(_, LambdaVariable(_, _, false, _), inputData, None) => inputData
}
}
/**
* Prunes unnecessary object serializers from query plan. This rule prunes both individual
* serializer and nested fields in serializers.
*/
object ObjectSerializerPruning extends Rule[LogicalPlan] {
/**
* Visible for testing.
* Collects all struct types from given data type object, recursively.
*/
def collectStructType(dt: DataType, structs: ArrayBuffer[StructType]): ArrayBuffer[StructType] = {
dt match {
case s @ StructType(fields) =>
structs += s
fields.map(f => collectStructType(f.dataType, structs))
case ArrayType(elementType, _) =>
collectStructType(elementType, structs)
case MapType(keyType, valueType, _) =>
collectStructType(keyType, structs)
collectStructType(valueType, structs)
// We don't use UserDefinedType in those serializers.
case _: UserDefinedType[_] =>
case _ =>
}
structs
}
/**
* This method returns pruned `CreateNamedStruct` expression given an original `CreateNamedStruct`
* and a pruned `StructType`.
*/
private def pruneNamedStruct(struct: CreateNamedStruct, prunedType: StructType) = {
// Filters out the pruned fields.
val resolver = conf.resolver
val prunedFields = struct.nameExprs.zip(struct.valExprs).filter { case (nameExpr, _) =>
val name = nameExpr.eval(EmptyRow).toString
prunedType.fieldNames.exists(resolver(_, name))
}.flatMap(pair => Seq(pair._1, pair._2))
CreateNamedStruct(prunedFields)
}
/**
* When we change nested serializer data type, `If` expression will be unresolved because
* literal null's data type doesn't match now. We need to align it with new data type.
* Note: we should do `transformUp` explicitly to change data types.
*/
private def alignNullTypeInIf(expr: Expression) = expr.transformUp {
case i @ If(IsNullCondition(), Literal(null, dt), ser) if !dt.sameType(ser.dataType) =>
i.copy(trueValue = Literal(null, ser.dataType))
}
object IsNullCondition {
def unapply(expr: Expression): Boolean = expr match {
case _: IsNull => true
case i: Invoke if i.functionName == "isNullAt" => true
case _ => false
}
}
/**
* This method prunes given serializer expression by given pruned data type. For example,
* given a serializer creating struct(a int, b int) and pruned data type struct(a int),
* this method returns pruned serializer creating struct(a int).
*/
def pruneSerializer(
serializer: NamedExpression,
prunedDataType: DataType): NamedExpression = {
val prunedStructTypes = collectStructType(prunedDataType, ArrayBuffer.empty[StructType])
.toIterator
def transformer: PartialFunction[Expression, Expression] = {
case m: ExternalMapToCatalyst =>
val prunedKeyConverter = m.keyConverter.transformDown(transformer)
val prunedValueConverter = m.valueConverter.transformDown(transformer)
m.copy(keyConverter = alignNullTypeInIf(prunedKeyConverter),
valueConverter = alignNullTypeInIf(prunedValueConverter))
case s: CreateNamedStruct if prunedStructTypes.hasNext =>
val prunedType = prunedStructTypes.next()
pruneNamedStruct(s, prunedType)
}
val transformedSerializer = serializer.transformDown(transformer)
val prunedSerializer = alignNullTypeInIf(transformedSerializer).asInstanceOf[NamedExpression]
if (prunedSerializer.dataType.sameType(prunedDataType)) {
prunedSerializer
} else {
serializer
}
}
def apply(plan: LogicalPlan): LogicalPlan = plan transform {
case p @ Project(_, s: SerializeFromObject) =>
// Prunes individual serializer if it is not used at all by above projection.
val usedRefs = p.references
val prunedSerializer = s.serializer.filter(usedRefs.contains)
val rootFields = SchemaPruning.identifyRootFields(p.projectList, Seq.empty)
if (conf.serializerNestedSchemaPruningEnabled && rootFields.nonEmpty) {
// Prunes nested fields in serializers.
val prunedSchema = SchemaPruning.pruneDataSchema(
StructType.fromAttributes(prunedSerializer.map(_.toAttribute)), rootFields)
val nestedPrunedSerializer = prunedSerializer.zipWithIndex.map { case (serializer, idx) =>
pruneSerializer(serializer, prunedSchema(idx).dataType)
}
// Builds new projection.
val projectionOverSchema = ProjectionOverSchema(prunedSchema)
val newProjects = p.projectList.map(_.transformDown {
case projectionOverSchema(expr) => expr
}).map { case expr: NamedExpression => expr }
p.copy(projectList = newProjects,
child = SerializeFromObject(nestedPrunedSerializer, s.child))
} else {
p.copy(child = SerializeFromObject(prunedSerializer, s.child))
}
}
}
/**
* Reassigns per-query unique IDs to `LambdaVariable`s, whose original IDs are globally unique. This
* can help Spark to hit codegen cache more often and improve performance.
*/
object ReassignLambdaVariableID extends Rule[LogicalPlan] {
override def apply(plan: LogicalPlan): LogicalPlan = {
// The original LambdaVariable IDs are all positive. To avoid conflicts, the new IDs are all
// negative and starts from -1.
var newId = 0L
val oldIdToNewId = scala.collection.mutable.Map.empty[Long, Long]
// The `LambdaVariable` IDs in a query should be all positive or negative. Otherwise it's a bug
// and we should fail earlier.
var hasNegativeIds = false
var hasPositiveIds = false
plan.transformAllExpressions {
case lr: LambdaVariable if lr.id == 0 =>
throw new IllegalStateException("LambdaVariable should never has 0 as its ID.")
case lr: LambdaVariable if lr.id < 0 =>
hasNegativeIds = true
if (hasPositiveIds) {
throw new IllegalStateException(
"LambdaVariable IDs in a query should be all positive or negative.")
}
lr
case lr: LambdaVariable if lr.id > 0 =>
hasPositiveIds = true
if (hasNegativeIds) {
throw new IllegalStateException(
"LambdaVariable IDs in a query should be all positive or negative.")
}
if (oldIdToNewId.contains(lr.id)) {
// This `LambdaVariable` has appeared before, reuse the newly generated ID.
lr.copy(id = oldIdToNewId(lr.id))
} else {
// This is the first appearance of this `LambdaVariable`, generate a new ID.
newId -= 1
oldIdToNewId(lr.id) = newId
lr.copy(id = newId)
}
}
}
}
|
BryanCutler/spark
|
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/objects.scala
|
Scala
|
apache-2.0
| 12,071 |
package io.flow.reference
import io.flow.reference.v0.models.Country
import scala.collection.mutable
object Countries extends Validation[Country] {
override val cache: Map[String, Country] = Map(
data.Countries.all.flatMap { c =>
Seq(
c.iso31662.toLowerCase -> c,
c.iso31663.toLowerCase -> c,
c.name.toLowerCase -> c
)
}: _*
)
override def singular = "country"
override def plural = "countries"
override def name(c: Country): String = c.name
/**
* Validation function that will turn two provided codes into either of:
*
* - 1 or more errors
* - A tuple of two country objects
*
* Example:
*
* validate2("country of origin", "CHN", "destination", "AUS") match {
* case Left(errors) => { }
* case Right((origin, destination)) => { }
* }
*/
def validate2(label1: String, code1: String, label2: String, code2: String): Either[Seq[String], (Country, Country)] = {
validateN(
Seq(
(label1, code1),
(label2, code2)
)
) match {
case Left(errors) => {
Left(errors)
}
case Right(countries) => {
countries.toList match {
case a :: b :: Nil => Right(a -> b)
case _ => sys.error(s"validateN should have returned 2 items but instead returned: ${countries.size}")
}
}
}
}
def validateN(labelsAndCodes: Seq[(String, String)]): Either[Seq[String], Seq[Country]] = {
val invalid = mutable.Map[String, mutable.ListBuffer[String]]()
val countries = mutable.ListBuffer[Country]()
labelsAndCodes.foreach { case (label, code) =>
Countries.find(code) match {
case None => {
invalid.get(label) match {
case None => {
val tmp = mutable.ListBuffer[String]()
tmp += code
invalid += (label -> tmp)
}
case Some(values) => {
values += code
}
}
}
case Some(c) => {
countries.append(c)
}
}
}
val messages: Seq[String] = invalid.map { case (label, codes) =>
if (codes.size == 1) {
s"Invalid $label ${codes.mkString("'", "', '", "'")}. Must be a valid ISO 3166-2 or 3166-3 digit code. See https://api.flow.io/reference/countries"
} else {
s"Invalid $label ${codes.mkString("'", "', '", "'")}. Must be valid ISO 3166-2 or 3166-3 digit codes. See https://api.flow.io/reference/countries"
}
}.toSeq
messages.toList match {
case Nil => Right(countries.toSeq)
case errors => Left(errors)
}
}
}
|
flowcommerce/lib-reference-scala
|
src/main/scala/io/flow/reference/Countries.scala
|
Scala
|
mit
| 2,667 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.nn
import org.scalatest.{BeforeAndAfter, FlatSpec, Matchers}
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl.torch.TH
import com.intel.analytics.bigdl.utils.Engine
import scala.util.Random
@com.intel.analytics.bigdl.tags.Parallel
class LogSoftMaxSpec extends FlatSpec with Matchers with BeforeAndAfter {
before {
Engine.setNodeAndCore(1, 4)
}
"A LogSoftMax Module" should " be fast using MKL" in {
val layer = LogSoftMax[Float]()
layer.clearState()
val batchSize = 20
val input = Tensor[Float](batchSize, 10000)
val gradOutput = Tensor[Float](batchSize, 10000)
var startTime = System.nanoTime()
var duration = (System.nanoTime() - startTime) / 1e9
var sum = 0.0
for (i <- 1 to 5) {
layer.forward(input)
layer.backward(input, gradOutput)
}
for (i <- 1 to 5) {
startTime = System.nanoTime()
layer.forward(input)
layer.backward(input, gradOutput)
duration = (System.nanoTime() - startTime) / 1e9
println(s"speed: = ${duration} seconds")
sum += duration
}
println(s"avg speed: = ${sum / 5}")
layer.clearState()
}
"A LogSoftMax Module " should "generate correct output" in {
val module = new LogSoftMax[Double]()
val input = Tensor[Double](2)
input(Array(1)) = 0.1274271844660194
input(Array(2)) = 0.6225728155339806
val expectedOutput = Tensor[Double](2)
expectedOutput(Array(1)) = -0.9710581069556
expectedOutput(Array(2)) = -0.47591247588764
val output = module.forward(input)
output should be(expectedOutput)
}
"A LogSoftMax Module " should "generate correct output and grad" in {
val module = new LogSoftMax[Double]()
val input = Tensor[Double](3, 3)
input(Array(1, 1)) = 0.33655226649716
input(Array(1, 2)) = 0.77367000770755
input(Array(1, 3)) = 0.031494265655056
input(Array(2, 1)) = 0.11129087698646
input(Array(2, 2)) = 0.14688249188475
input(Array(2, 3)) = 0.49454387230799
input(Array(3, 1)) = 0.45682632108219
input(Array(3, 2)) = 0.85653987620026
input(Array(3, 3)) = 0.42569971177727
val gradOutput = Tensor[Double](3, 3)
gradOutput(Array(1, 1)) = 0.56766371615231
gradOutput(Array(1, 2)) = 0.55222836649045
gradOutput(Array(1, 3)) = 0.47152533312328
gradOutput(Array(2, 1)) = 0.27471435652114
gradOutput(Array(2, 2)) = 0.65794085455127
gradOutput(Array(2, 3)) = 0.6130160340108
gradOutput(Array(3, 1)) = 0.054757355013862
gradOutput(Array(3, 2)) = 0.93723741802387
gradOutput(Array(3, 3)) = 0.45930492319167
val expectedOutput = Tensor[Double](3, 3)
expectedOutput(Array(1, 1)) = -1.1894637490911
expectedOutput(Array(1, 2)) = -0.75234600788072
expectedOutput(Array(1, 3)) = -1.4945217499332
expectedOutput(Array(2, 1)) = -1.2537001628522
expectedOutput(Array(2, 2)) = -1.2181085479539
expectedOutput(Array(2, 3)) = -0.87044716753068
expectedOutput(Array(3, 1)) = -1.2414854064608
expectedOutput(Array(3, 2)) = -0.84177185134272
expectedOutput(Array(3, 3)) = -1.2726120157657
val expectedGrad = Tensor[Double](3, 3)
expectedGrad(Array(1, 1)) = 0.083261006513078
expectedGrad(Array(1, 2)) = -0.19774248918721
expectedGrad(Array(1, 3)) = 0.11448148267413
expectedGrad(Array(2, 1)) = -0.166492308996
expectedGrad(Array(2, 2)) = 0.20074813405794
expectedGrad(Array(2, 3)) = -0.034255825061936
expectedGrad(Array(3, 1)) = -0.36460248987794
expectedGrad(Array(3, 2)) = 0.3118052217279
expectedGrad(Array(3, 3)) = 0.052797268150042
val inputOrg = input.clone()
val gradOutputOrg = gradOutput.clone()
val output = module.forward(input)
val gradInput = module.backward(input, gradOutput)
output should be(expectedOutput)
gradInput should be(expectedGrad)
input should be(inputOrg)
gradOutput should be(gradOutputOrg)
}
"LogSoftMax float module" should "won't return Infinity when input is bigger than 89" in {
val module = new LogSoftMax[Float]()
Random.setSeed(100)
val input = Tensor[Float](2, 5).apply1(e => Random.nextFloat() + 90)
val output = module.forward(input).toTensor[Float]
output.apply1(v => {v.isInfinity should be (false); v})
}
}
|
jenniew/BigDL
|
spark/dl/src/test/scala/com/intel/analytics/bigdl/nn/LogSoftMaxSpec.scala
|
Scala
|
apache-2.0
| 4,906 |
package org.bitcoins.wallet
import org.bitcoins.core.api.dlc.wallet.AnyDLCHDWalletApi
import org.bitcoins.core.api.wallet.db.AccountDb
import org.bitcoins.core.hd.AddressType
import org.bitcoins.core.protocol.BitcoinAddress
import scala.concurrent.Future
/** ScalaMock cannot stub traits with protected methods,
* so we need to stub them manually.
*/
abstract class MockWalletApi extends AnyDLCHDWalletApi {
override def getNewChangeAddress(account: AccountDb): Future[BitcoinAddress] =
stub
override def getDefaultAccount(): Future[AccountDb] = stub
override def getDefaultAccountForType(
addressType: AddressType): Future[AccountDb] = stub
private def stub[T] =
Future.failed[T](new RuntimeException("Not implemented"))
}
|
bitcoin-s/bitcoin-s
|
app/server-test/src/test/scala/org/bitcoins/wallet/MockWalletApi.scala
|
Scala
|
mit
| 758 |
package uk.zebington.junkcraft.handlers
import net.minecraftforge.fml.common.network.NetworkRegistry
import net.minecraftforge.fml.relauncher.Side
import uk.zebington.junkcraft._
import uk.zebington.junkcraft.messages.MessageSpikeStation
/**
* Created by Charlotte on 22/02/2015.
*/
object JCPacketHandler {
val Instance = NetworkRegistry.INSTANCE.newSimpleChannel(Id)
def init() {
Instance.registerMessage(classOf[MessageSpikeStation], classOf[MessageSpikeStation], 0, Side.CLIENT)
Instance.registerMessage(classOf[MessageSpikeStation], classOf[MessageSpikeStation], 1, Side.SERVER)
}
}
|
zebington/JunkCraft
|
src/main/scala/uk/zebington/junkcraft/handlers/JCPacketHandler.scala
|
Scala
|
gpl-3.0
| 607 |
package is.hail.types.physical
import is.hail.types.virtual.TStream
abstract class PStream extends PIterable with PUnrealizable {
lazy val virtualType: TStream = TStream(elementType.virtualType)
def separateRegions: Boolean
def _asIdent = s"stream_of_${elementType.asIdent}"
}
abstract class PStreamCode extends PCode with PUnrealizableCode {
def pt: PStream
}
|
cseed/hail
|
hail/src/main/scala/is/hail/types/physical/PStream.scala
|
Scala
|
mit
| 374 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.