code
stringlengths
5
1M
repo_name
stringlengths
5
109
path
stringlengths
6
208
language
stringclasses
1 value
license
stringclasses
15 values
size
int64
5
1M
package com.thoughtworks.deeplearning.plugins /** A plugin that automatically names [[Layer]]s and [[Weight]]s. * * @author 杨博 (Yang Bo) */ trait Names { trait DifferentiableApi { def fullName: sourcecode.FullName def name: sourcecode.Name override def toString: String = { raw"""Weight[fullName=${fullName.value}]""" } } type Differentiable <: DifferentiableApi }
ThoughtWorksInc/DeepLearning.scala
plugins-Names/src/main/scala/com/thoughtworks/deeplearning/plugins/Names.scala
Scala
apache-2.0
408
package pl.writeonly.son2.vaadin.util object JavaFunctions { implicit def scalaFunctionToJavaFunction[From, To](function: (From) => To): java.util.function.Function[From, To] = { new java.util.function.Function[From, To] { override def apply(input: From): To = function(input) } } }
writeonly/scalare
scalare-adin/src/main/scala/pl/writeonly/son2/vaadin/util/JavaFunctions.scala
Scala
artistic-2.0
303
package com.youdevise.albatross import org.specs2.Specification import Bounds._ class BoundSpec extends Specification { def is = openUpperBound ^ closedUpperBound ^ openLowerBound ^ closedLowerBound ^ leastUpperBound ^ greatestLowerBound ^ leastLowerBound ^ greatestUpperBound ^ end def openUpperBound = "An open upper bound" ^ "Is open" ! { OpenUpperBound(100).isOpen must beTrue } ^ "Is upper" ! { OpenUpperBound(100).isUpper must beTrue } ^ "Encloses any value less than its endpoint" ! { OpenUpperBound(100).encloses(99) must beTrue } ^ "Does not enclose its endpoint" ! { OpenUpperBound(100).encloses(100) must beFalse } ^ "Does not enclose any value greater than its endpoint" ! { OpenUpperBound(100).encloses(101) must beFalse } ^ end def closedUpperBound = "A closed upper bound" ^ "Is closed" ! { ClosedUpperBound(100).isClosed must beTrue } ^ "Is upper" ! { ClosedUpperBound(100).isUpper must beTrue } ^ "Encloses any value less than its endpoint" ! { ClosedUpperBound(100).encloses(99) must beTrue } ^ "Encloses its endpoint" ! { ClosedUpperBound(100).encloses(100) must beTrue } ^ "Does not enclose any value greater than its endpoint" ! { ClosedUpperBound(100).encloses(101) must beFalse } ^ end def openLowerBound = "An open lower bound" ^ "Is open" ! { OpenLowerBound(100).isOpen must beTrue } ^ "Is lower" ! { OpenLowerBound(100).isLower must beTrue } ^ "Encloses any value greater than its endpoint" ! { OpenLowerBound(100).encloses(101) must beTrue } ^ "Does not enclose its endpoint" ! { OpenLowerBound(100).encloses(100) must beFalse } ^ "Does not enclose any value less than its endpoint" ! { OpenLowerBound(100).encloses(99) must beFalse } ^ end def closedLowerBound = "A closed lower bound" ^ "Is closed" ! { ClosedLowerBound(100).isClosed must beTrue } ^ "Is lower" ! { ClosedLowerBound(100).isLower must beTrue } ^ "Encloses any value greater than its endpoint" ! { ClosedLowerBound(100).encloses(101) must beTrue } ^ "Encloses its endpoint" ! { ClosedLowerBound(100).encloses(100) must beTrue } ^ "Does not enclose any value less than its endpoint" ! { ClosedLowerBound(100).encloses(99) must beFalse } ^ end def leastUpperBound = "The least upper bound" ^ "Of a closed and an open upper bound with the same endpoint is the open upper bound" ! { leastUpper(Some(ClosedUpperBound(0)), Some(OpenUpperBound(0))) must beSome(OpenUpperBound(0)) } ^ "Of two closed bounds is the bound with the lowest endpoint" ! { leastUpper(Some(ClosedUpperBound(0)), Some(ClosedUpperBound(-1))) must beSome(ClosedUpperBound(-1)) } ^ "Of two open bounds is the bound with the lowest endpoint" ! { leastUpper(Some(OpenUpperBound(0)), Some(OpenUpperBound(-1))) must beSome(OpenUpperBound(-1)) } ^ "Of any bound and no bound is the determinate bound" ! { leastUpper(Some(OpenUpperBound(0)), None) must beSome(OpenUpperBound(0)) } ^ end def greatestLowerBound = "The greatest lower bound" ^ "Of a closed and an open lower bound with the same endpoint is the open lower bound" ! { greatestLower(Some(ClosedLowerBound(0)), Some(OpenLowerBound(0))) must beSome(OpenLowerBound(0)) } ^ "Of two closed bounds is the bound with the highest endpoint" ! { greatestLower(Some(ClosedLowerBound(0)), Some(ClosedLowerBound(-1))) must beSome(ClosedLowerBound(0)) } ^ "Of two open bounds is the bound with the highest endpoint" ! { greatestLower(Some(OpenLowerBound(0)), Some(OpenLowerBound(-1))) must beSome(OpenLowerBound(0)) } ^ "Of any bound and no bound is the determinate bound" ! { greatestLower(Some(OpenLowerBound(0)), None) must beSome(OpenLowerBound(0)) } ^ end def leastLowerBound = "The least lower bound" ^ "Of a closed and an open lower bound with the same endpoint is the closed lower bound" ! { leastLower(Some(ClosedLowerBound(0)), Some(ClosedLowerBound(0))) must beSome(ClosedLowerBound(0)) } ^ "Of two closed bounds is the bound with the lowest endpoint" ! { leastLower(Some(ClosedLowerBound(0)), Some(ClosedLowerBound(-1))) must beSome(ClosedLowerBound(-1)) } ^ "Of two open bounds is the bound with the lowest endpoint" ! { leastLower(Some(OpenLowerBound(0)), Some(OpenLowerBound(-1))) must beSome(OpenLowerBound(-1)) } ^ "Of any bound and no bound is no bound" ! { leastLower(Some(OpenLowerBound(0)), None) must beNone } ^ end def greatestUpperBound = "The greatest upper bound" ^ "Of a closed and an open upper bound with the same endpoint is the closed upper bound" ! { greatestUpper(Some(ClosedUpperBound(0)), Some(OpenUpperBound(0))) must beSome(ClosedUpperBound(0)) } ^ "Of two closed bounds is the bound with the highest endpoint" ! { greatestUpper(Some(ClosedUpperBound(0)), Some(ClosedUpperBound(-1))) must beSome(ClosedUpperBound(0)) } ^ "Of two open bounds is the bound with the highest endpoint" ! { greatestUpper(Some(OpenUpperBound(0)), Some(OpenUpperBound(-1))) must beSome(OpenUpperBound(0)) } ^ "Of any bound and no bound is no bound" ! { greatestUpper(Some(OpenUpperBound(0)), None) must beNone } ^ end }
tim-group/albatross
src/test/scala/BoundSpec.scala
Scala
mit
5,574
/* * Copyright 2017 TWO SIGMA OPEN SOURCE, LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.twosigma.beakerx.scala.chart.xychart.plotitem import com.twosigma.beakerx.chart.Color import com.twosigma.beakerx.scala.JavaAdapter._ import scala.collection.JavaConverters._ class Area extends com.twosigma.beakerx.chart.xychart.plotitem.Area with AreaProperties trait AreaProperties extends BasedXYGraphicsProperties { this: com.twosigma.beakerx.chart.xychart.plotitem.Area => def interpolation = safeOption(getInterpolation) def interpolation_=(i: Int) = setInterpolation(i) }
jpallas/beakerx
kernel/scala/src/main/scala/com/twosigma/beakerx/scala/chart/xychart/plotitem/Area.scala
Scala
apache-2.0
1,126
/* * Copyright 2001-2013 Artima, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.scalatest import org.scalatest.prop.Tables import org.scalatest.junit.JUnit3Suite import org.scalatest.junit.JUnitSuite import org.scalatest.testng.TestNGSuite trait SuiteExamples extends Tables { type FixtureServices def suite: Suite with FixtureServices def fixtureSuite: fixture.Suite with FixtureServices def junit3Suite: JUnit3Suite with FixtureServices def junitSuite: JUnitSuite with FixtureServices def testngSuite: TestNGSuite with FixtureServices def funSuite: FunSuite with FixtureServices def fixtureFunSuite: fixture.FunSuite with FixtureServices def funSpec: FunSpec with FixtureServices def fixtureFunSpec: fixture.FunSpec with FixtureServices def featureSpec: FeatureSpec with FixtureServices def fixtureFeatureSpec: fixture.FeatureSpec with FixtureServices def flatSpec: FlatSpec with FixtureServices def fixtureFlatSpec: fixture.FlatSpec with FixtureServices def freeSpec: FreeSpec with FixtureServices def fixtureFreeSpec: fixture.FreeSpec with FixtureServices def propSpec: PropSpec with FixtureServices def fixturePropSpec: fixture.PropSpec with FixtureServices def wordSpec: WordSpec with FixtureServices def fixtureWordSpec: fixture.WordSpec with FixtureServices def spec: Spec with FixtureServices def fixtureSpec: fixture.Spec with FixtureServices def examples = Table( "suite", suite, fixtureSuite, junit3Suite, junitSuite, testngSuite, funSuite, fixtureFunSuite, funSpec, fixtureFunSpec, featureSpec, fixtureFeatureSpec, flatSpec, fixtureFlatSpec, freeSpec, fixtureFreeSpec, propSpec, fixturePropSpec, wordSpec, fixtureWordSpec, spec, fixtureSpec ) }
travisbrown/scalatest
src/test/scala/org/scalatest/SuiteExamples.scala
Scala
apache-2.0
2,384
package services import securesocial.core.RuntimeEnvironment import securesocial.core.services.UserService import securesocial.core.providers._ import scala.collection.immutable.ListMap /** * see https://github.com/jaliss/securesocial/blob/master/module-code/app/securesocial/core/RuntimeEnvironment.scala */ class MyEnvironment extends RuntimeEnvironment.Default[User] { override val userService: UserService[User] = new MyUserService() override lazy val providers = ListMap( include(new TwitterProvider(routes, cacheService, oauth1ClientFor(TwitterProvider.Twitter))), include(new FacebookProvider(routes, cacheService, oauth2ClientFor(FacebookProvider.Facebook))), include(new GoogleProvider(routes, cacheService, oauth2ClientFor(GoogleProvider.Google))) ) }
janih/play-java-securesocial
app/services/MyEnvironment.scala
Scala
apache-2.0
778
object HelloWorld { def main(args: Array[String]) = println("Hello World!") }
Artie18/remembering_scala
hello_world/src/main/scala/HelloWorld.scala
Scala
mpl-2.0
84
/* * Copyright (C) Lightbend Inc. <https://www.lightbend.com> */ package com.lightbend.lagom.internal.cluster import akka.actor.ActorRef import akka.actor.Address import akka.cluster.Cluster import akka.cluster.MemberStatus import akka.remote.testconductor.RoleName import akka.remote.testkit.MultiNodeSpec import akka.testkit.ImplicitSender import com.lightbend.lagom.internal.cluster.ClusterMultiNodeConfig.node1 import scala.concurrent.duration._ abstract class ClusteredMultiNodeUtils(val numOfNodes: Int, multiNodeConfig: ClusterMultiNodeConfig) extends MultiNodeSpec(multiNodeConfig, ClusterMultiNodeActorSystemFactory.createActorSystem()) with STMultiNodeSpec with ImplicitSender { override def initialParticipants: Int = roles.size def join(from: RoleName, to: RoleName): Unit = { runOn(from) { Cluster(system).join(node(to).address) } enterBarrier(from.name + "-joined") } def fullAddress(ref: ActorRef): Address = if (ref.path.address.hasLocalScope) Cluster(system).selfAddress else ref.path.address protected override def atStartup(): Unit = { roles.foreach(n => join(n, node1)) within(15.seconds) { awaitAssert(Cluster(system).state.members.size should be(numOfNodes)) awaitAssert( Cluster(system).state.members.toIndexedSeq.map(_.status).distinct should be(IndexedSeq(MemberStatus.Up)) ) } enterBarrier("startup") } }
lagom/lagom
cluster/core/src/multi-jvm/scala/com/lightbend/lagom/internal/cluster/ClusteredMultiNodeUtils.scala
Scala
apache-2.0
1,433
package com.github.agourlay.cornichon.steps.wrapped import java.util.concurrent.atomic.AtomicReference import java.util.function.UnaryOperator import com.github.agourlay.cornichon.core._ import com.github.agourlay.cornichon.steps.cats.EffectStep import com.github.agourlay.cornichon.testHelpers.CommonTestSuite import munit.FunSuite class ScenarioResourceStepSpec extends FunSuite with CommonTestSuite { import QueueManager._ test("acquire a resource and release it before the end of the run even if something blows up in the middle") { implicit val queueResource: QueueManager = new QueueManager val resourceStep = ScenarioResourceStep( "ensure queue exists", createAndStoreQueueInSession("the-queue"), deleteQueue("the-queue") ) val scenario = Scenario("", resourceStep :: brokenEffectStep :: Nil) val report = awaitIO(ScenarioRunner.runScenario(Session.newEmpty)(scenario)) val qName = report.session.get("the-queue").valueUnsafe assert(queueResource.actionsFor(qName) == List(CreateQueue(qName), DeleteQueue(qName))) assert(report.logs.contains(InfoLogInstruction("cleanup steps", 2))) assert(report.logs.exists { case SuccessLogInstruction("delete the queue: the-queue", _, _) => true; case _ => false }) } test("not run a ResourceStep if a previous step failed but should still clean up the resource steps that did run") { implicit val queueResource: QueueManager = new QueueManager val resourceStep1 = ScenarioResourceStep("ensure q1 exists", createAndStoreQueueInSession("q1"), deleteQueue("q1")) val resourceStep2 = ScenarioResourceStep("ensure q2 exists", createAndStoreQueueInSession("q2"), deleteQueue("q2")) val scenario = Scenario("resource step scenario", resourceStep1 :: brokenEffectStep :: resourceStep2 :: Nil) val rep = awaitIO(ScenarioRunner.runScenario(Session.newEmpty)(scenario)) val q1 = rep.session.get("q1").valueUnsafe assert(queueResource.actionsFor(q1) == List(CreateQueue(q1), DeleteQueue(q1))) } test("runs all the clean up steps in order") { val is = List.range(1, 5) implicit val queueResource: QueueManager = new QueueManager val resourceSteps = is.map(i => ScenarioResourceStep(s"ensure q$i exists", createAndStoreQueueInSession(s"q$i"), deleteQueue(s"q$i"))) val scenario = Scenario("resource step scenario", resourceSteps) val rep = awaitIO(ScenarioRunner.runScenario(Session.newEmpty)(scenario)) def q(i: Int) = rep.session.get(s"q$i").valueUnsafe assert(queueResource.allActions == is.map(i => CreateQueue(q(i))) ++ is.reverseIterator.map(i => DeleteQueue(q(i)))) } test("perform all the release steps even if one fails and report all the ones that failed") { implicit val queueResource: QueueManager = new QueueManager val resourceStep1 = ScenarioResourceStep("ensure q1 exists", createAndStoreQueueInSession("q1"), deleteQueue("q1")) val resourceStep2 = ScenarioResourceStep("ensure q2 exists", createAndStoreQueueInSession("q2"), failToDeleteQueue("q2")) val resourceStep3 = ScenarioResourceStep("ensure q3 exists", createAndStoreQueueInSession("q3"), failToDeleteQueue("q3")) val scenario = Scenario("resource step scenario", resourceStep1 :: resourceStep2 :: resourceStep3 :: Nil) val rep = awaitIO(ScenarioRunner.runScenario(Session.newEmpty)(scenario)) val q1 = rep.session.get("q1").valueUnsafe assert(queueResource.actionsFor(q1) == List(CreateQueue(q1), DeleteQueue(q1))) scenarioFailsWithMessage(rep) { """Scenario 'resource step scenario' failed: | |at step: |fail to delete the queue: q2 | |with error(s): |no queue for you | |and | |at step: |fail to delete the queue: q3 | |with error(s): |no queue for you | |seed for the run was '1' |""".stripMargin } } private def createAndStoreQueueInSession(key: String)(implicit queueResource: QueueManager) = EffectStep.fromSyncE( s"create the queue: $key", sc => { val name = key + "-" + sc.randomContext.alphanumeric(10) queueResource.create(name) sc.session.addValue(key, name) } ) private def deleteQueue(key: String)(implicit queueResource: QueueManager) = EffectStep.fromSync( s"delete the queue: $key", sc => { // To prove that steps are executed in sequence, it's useful to have them take varying amounts of time. // If they were being executed in parallel the order would be non-deterministic Thread.sleep(sc.randomContext.nextInt(50).toLong) queueResource.delete(sc.session.get(key).valueUnsafe) sc.session } ) private def failToDeleteQueue(key: String) = EffectStep.fromSyncE( s"fail to delete the queue: $key", _ => Left(BasicError("no queue for you")) ) class QueueManager { private val state = new AtomicReference[List[Action]](Nil) def create(name: String): Unit = { state.getAndUpdate(CreateQueue(name) :: (_: List[Action])) () } def delete(name: String): Unit = { state.getAndUpdate(DeleteQueue(name) :: (_: List[Action])) () } def allActions: List[Action] = state.get().reverse def actionsFor(name: String): List[Action] = allActions.collect { case a @ CreateQueue(`name`) => a case a @ DeleteQueue(`name`) => a } } object QueueManager { sealed trait Action case class CreateQueue(name: String) extends Action case class DeleteQueue(name: String) extends Action implicit def fnToUnaryOp[A](f: A => A): UnaryOperator[A] = new UnaryOperator[A] { def apply(t: A): A = f(t) } } }
agourlay/cornichon
cornichon-core/src/test/scala/com/github/agourlay/cornichon/steps/wrapped/ScenarioResourceStepSpec.scala
Scala
apache-2.0
5,827
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.table.planner.plan.stream.sql import org.apache.flink.api.scala._ import org.apache.flink.table.api._ import org.apache.flink.table.api.internal.TableEnvironmentInternal import org.apache.flink.table.planner.plan.optimize.RelNodeBlockPlanBuilder import org.apache.flink.table.planner.runtime.utils.JavaUserDefinedScalarFunctions.NonDeterministicUdf import org.apache.flink.table.planner.utils.{TableFunc1, TableTestBase} import org.apache.flink.table.types.logical.{BigIntType, IntType, VarCharType} import org.junit.Test class DagOptimizationTest extends TableTestBase { private val util = streamTestUtil() util.addTableSource[(Int, Long, String)]("MyTable", 'a, 'b, 'c) util.addTableSource[(Int, Long, String)]("MyTable1", 'd, 'e, 'f) val STRING = new VarCharType(VarCharType.MAX_LENGTH) val LONG = new BigIntType() val INT = new IntType() @Test def testSingleSink1(): Unit = { val table = util.tableEnv.sqlQuery("SELECT c, COUNT(a) AS cnt FROM MyTable GROUP BY c") val retractSink = util.createRetractTableSink(Array("c", "cnt"), Array(STRING, LONG)) util.verifyPlanInsert(table, retractSink, "retractSink", ExplainDetail.CHANGELOG_MODE) } @Test def testSingleSink2(): Unit = { val table1 = util.tableEnv.sqlQuery("SELECT a as a1, b FROM MyTable WHERE a <= 10") util.tableEnv.registerTable("table1", table1) val table2 = util.tableEnv.sqlQuery("SELECT a, b, c FROM MyTable WHERE a >= 0") util.tableEnv.registerTable("table2", table2) val table3 = util.tableEnv.sqlQuery("SELECT a AS a2, c FROM table2 WHERE b >= 5") util.tableEnv.registerTable("table3", table3) val table4 = util.tableEnv.sqlQuery("SELECT a AS a3, c as c1 FROM table2 WHERE b < 5") util.tableEnv.registerTable("table4", table4) val table5 = util.tableEnv.sqlQuery("SELECT a1, b, c as c2 FROM table1, table3 WHERE a1 = a2") util.tableEnv.registerTable("table5", table5) val table6 = util.tableEnv.sqlQuery("SELECT a1, b, c1 FROM table4, table5 WHERE a1 = a3") val appendSink = util.createAppendTableSink(Array("a1", "b", "c1"), Array(INT, LONG, STRING)) util.verifyPlanInsert(table6, appendSink, "appendSink", ExplainDetail.CHANGELOG_MODE) } @Test def testSingleSink3(): Unit = { util.addDataStream[(Int, Long, String, Double, Boolean)]("MyTable2", 'a, 'b, 'c, 'd, 'e) val table1 = util.tableEnv.sqlQuery("SELECT a AS a1, b as b1 FROM MyTable WHERE a <= 10") util.tableEnv.registerTable("table1", table1) val table2 = util.tableEnv.sqlQuery("SELECT a, b1 FROM table1, MyTable2 WHERE a = a1") util.tableEnv.registerTable("table2", table2) val table3 = util.tableEnv.sqlQuery("SELECT * FROM table1 UNION ALL SELECT * FROM table2") val appendSink = util.createAppendTableSink(Array("a1", "b1"), Array(INT, LONG)) util.verifyPlanInsert(table3, appendSink, "appendSink", ExplainDetail.CHANGELOG_MODE) } @Test def testSingleSink4(): Unit = { val table1 = util.tableEnv.sqlQuery("SELECT a as a1, b FROM MyTable WHERE a <= 10") util.tableEnv.registerTable("table1", table1) val table2 = util.tableEnv.sqlQuery("SELECT a, b, c FROM MyTable WHERE a >= 0") util.tableEnv.registerTable("table2", table2) val table3 = util.tableEnv.sqlQuery("SELECT a AS a2, c FROM table2 WHERE b >= 5") util.tableEnv.registerTable("table3", table3) val table4 = util.tableEnv.sqlQuery("SELECT a AS a3, c AS c1 FROM table2 WHERE b < 5") util.tableEnv.registerTable("table4", table4) val table5 = util.tableEnv.sqlQuery("SELECT a1, b, c AS c2 from table1, table3 WHERE a1 = a2") util.tableEnv.registerTable("table5", table5) val table6 = util.tableEnv.sqlQuery("SELECT a3, b as b1, c1 FROM table4, table5 WHERE a1 = a3") util.tableEnv.registerTable("table6", table6) val table7 = util.tableEnv.sqlQuery("SELECT a1, b1, c1 FROM table1, table6 WHERE a1 = a3") val appendSink = util.createAppendTableSink(Array("a", "b", "c"), Array(INT, LONG, STRING)) util.verifyPlanInsert(table7, appendSink, "appendSink", ExplainDetail.CHANGELOG_MODE) } @Test def testSingleSinkWithUDTF(): Unit = { util.addTableSource[(Int, Long, Int, String, Long)]("MyTable2", 'i, 'j, 'k, 'l, 'm) util.addFunction("split", new TableFunc1) val sqlQuery = """ |select * from | (SELECT * FROM MyTable, MyTable1, MyTable2 WHERE b = e AND a = i) t, | LATERAL TABLE(split(c)) as T(s) """.stripMargin val table = util.tableEnv.sqlQuery(sqlQuery) val appendSink = util.createAppendTableSink( Array("a", "b", "c", "d", "e", "f", "i", "j", "k", "l", "m", "s"), Array(INT, LONG, STRING, INT, LONG, STRING, INT, LONG, INT, STRING, LONG, STRING)) util.verifyPlanInsert(table, appendSink, "appendSink", ExplainDetail.CHANGELOG_MODE) } @Test def testSingleSinkSplitOnUnion(): Unit = { util.tableEnv.getConfig.getConfiguration.setBoolean( RelNodeBlockPlanBuilder.TABLE_OPTIMIZER_UNIONALL_AS_BREAKPOINT_DISABLED, true) val sqlQuery = "SELECT SUM(a) AS total_sum FROM " + "(SELECT a, c FROM MyTable UNION ALL SELECT d, f FROM MyTable1)" val table = util.tableEnv.sqlQuery(sqlQuery) val retractSink = util.createRetractTableSink(Array("total_sum"), Array(INT)) util.verifyPlanInsert(table, retractSink, "retractSink", ExplainDetail.CHANGELOG_MODE) } @Test def testMultiSinks1(): Unit = { val stmtSet = util.tableEnv.createStatementSet() util.tableEnv.getConfig.getConfiguration.setBoolean( RelNodeBlockPlanBuilder.TABLE_OPTIMIZER_REUSE_OPTIMIZE_BLOCK_WITH_DIGEST_ENABLED, true) val table1 = util.tableEnv.sqlQuery("SELECT SUM(a) AS sum_a, c FROM MyTable GROUP BY c") util.tableEnv.registerTable("table1", table1) val table2 = util.tableEnv.sqlQuery("SELECT SUM(sum_a) AS total_sum FROM table1") val table3 = util.tableEnv.sqlQuery("SELECT MIN(sum_a) AS total_min FROM table1") val retractSink1 = util.createRetractTableSink(Array("total_sum"), Array(INT)) util.tableEnv.asInstanceOf[TableEnvironmentInternal].registerTableSinkInternal( "retractSink1", retractSink1) stmtSet.addInsert("retractSink1", table2) val retractSink2 = util.createRetractTableSink(Array("total_min"), Array(INT)) util.tableEnv.asInstanceOf[TableEnvironmentInternal].registerTableSinkInternal( "retractSink2", retractSink2) stmtSet.addInsert("retractSink2", table3) util.verifyPlan(stmtSet, ExplainDetail.CHANGELOG_MODE) } @Test def testMultiSinks2(): Unit = { val stmtSet = util.tableEnv.createStatementSet() util.tableEnv.getConfig.getConfiguration.setBoolean( RelNodeBlockPlanBuilder.TABLE_OPTIMIZER_UNIONALL_AS_BREAKPOINT_DISABLED, false) util.addTableSource[(Int, Long, String, Double, Boolean)]("MyTable2", 'a, 'b, 'c, 'd, 'e) val table1 = util.tableEnv.sqlQuery("SELECT a as a1, b as b1 FROM MyTable WHERE a <= 10") util.tableEnv.registerTable("table1", table1) val table2 = util.tableEnv.sqlQuery("SELECT a, b1 from table1, MyTable2 where a = a1") util.tableEnv.registerTable("table2", table2) val table3 = util.tableEnv.sqlQuery("SELECT * FROM table1 UNION ALL SELECT * FROM table2") val appendSink1 = util.createAppendTableSink(Array("a", "b1"), Array(INT, LONG)) util.tableEnv.asInstanceOf[TableEnvironmentInternal].registerTableSinkInternal( "appendSink1", appendSink1) stmtSet.addInsert("appendSink1", table3) val appendSink2 = util.createAppendTableSink(Array("a", "b1"), Array(INT, LONG)) util.tableEnv.asInstanceOf[TableEnvironmentInternal].registerTableSinkInternal( "appendSink2", appendSink2) stmtSet.addInsert("appendSink2", table3) util.verifyPlan(stmtSet, ExplainDetail.CHANGELOG_MODE) } @Test def testMultiSinks3(): Unit = { val stmtSet = util.tableEnv.createStatementSet() util.tableEnv.getConfig.getConfiguration.setBoolean( RelNodeBlockPlanBuilder.TABLE_OPTIMIZER_UNIONALL_AS_BREAKPOINT_DISABLED, false) util.addTableSource[(Int, Long, String, Double, Boolean)]("MyTable2", 'a, 'b, 'c, 'd, 'e) val table1 = util.tableEnv.sqlQuery("SELECT a AS a1, b AS b1 FROM MyTable WHERE a <= 10") util.tableEnv.registerTable("table1", table1) val table2 = util.tableEnv.sqlQuery("SELECT a, b1 FROM table1, MyTable2 WHERE a = a1") util.tableEnv.registerTable("table2", table2) val table3 = util.tableEnv.sqlQuery("SELECT * FROM table1 UNION ALL SELECT * FROM table2") val appendSink1 = util.createAppendTableSink(Array("a", "b1"), Array(INT, LONG)) util.tableEnv.asInstanceOf[TableEnvironmentInternal].registerTableSinkInternal( "appendSink1", appendSink1) stmtSet.addInsert("appendSink1", table2) val appendSink2 = util.createAppendTableSink(Array("a", "b1"), Array(INT, LONG)) util.tableEnv.asInstanceOf[TableEnvironmentInternal].registerTableSinkInternal( "appendSink2", appendSink2) stmtSet.addInsert("appendSink2", table3) util.verifyPlan(stmtSet, ExplainDetail.CHANGELOG_MODE) } @Test def testMultiSinks4(): Unit = { val stmtSet = util.tableEnv.createStatementSet() val table1 = util.tableEnv.sqlQuery("SELECT a as a1, b FROM MyTable WHERE a <= 10") util.tableEnv.registerTable("table1", table1) val table2 = util.tableEnv.sqlQuery("SELECT a, b, c FROM MyTable WHERE a >= 0") util.tableEnv.registerTable("table2", table2) val table3 = util.tableEnv.sqlQuery("SELECT a as a2, c FROM table2 WHERE b >= 5") util.tableEnv.registerTable("table3", table3) val table4 = util.tableEnv.sqlQuery("SELECT a as a3, c as c1 FROM table2 WHERE b < 5") util.tableEnv.registerTable("table4", table4) val table5 = util.tableEnv.sqlQuery("SELECT a1, b, c as c2 FROM table1, table3 WHERE a1 = a2") util.tableEnv.registerTable("table5", table5) val table6 = util.tableEnv.sqlQuery("SELECT a1, b, c1 FROM table4, table5 WHERE a1 = a3") val appendSink1 = util.createAppendTableSink(Array("a1", "b", "c2"), Array(INT, LONG, STRING)) util.tableEnv.asInstanceOf[TableEnvironmentInternal].registerTableSinkInternal( "appendSink1", appendSink1) stmtSet.addInsert("appendSink1", table5) val appendSink2 = util.createAppendTableSink(Array("a1", "b", "c1"), Array(INT, LONG, STRING)) util.tableEnv.asInstanceOf[TableEnvironmentInternal].registerTableSinkInternal( "appendSink2", appendSink2) stmtSet.addInsert("appendSink2", table6) util.verifyPlan(stmtSet, ExplainDetail.CHANGELOG_MODE) } @Test def testMultiSinks5(): Unit = { val stmtSet = util.tableEnv.createStatementSet() util.tableEnv.getConfig.getConfiguration.setBoolean( RelNodeBlockPlanBuilder.TABLE_OPTIMIZER_REUSE_OPTIMIZE_BLOCK_WITH_DIGEST_ENABLED, true) // test with non-deterministic udf util.tableEnv.registerFunction("random_udf", new NonDeterministicUdf()) val table1 = util.tableEnv.sqlQuery("SELECT random_udf(a) AS a, c FROM MyTable") util.tableEnv.registerTable("table1", table1) val table2 = util.tableEnv.sqlQuery("SELECT SUM(a) AS total_sum FROM table1") val table3 = util.tableEnv.sqlQuery("SELECT MIN(a) AS total_min FROM table1") val retractSink1 = util.createRetractTableSink(Array("total_sum"), Array(INT)) util.tableEnv.asInstanceOf[TableEnvironmentInternal].registerTableSinkInternal( "retractSink1", retractSink1) stmtSet.addInsert("retractSink1", table2) val retractSink2 = util.createRetractTableSink(Array("total_min"), Array(INT)) util.tableEnv.asInstanceOf[TableEnvironmentInternal].registerTableSinkInternal( "retractSink2", retractSink2) stmtSet.addInsert("retractSink2", table3) util.verifyPlan(stmtSet, ExplainDetail.CHANGELOG_MODE) } @Test def testMultiSinksWithUDTF(): Unit = { val stmtSet = util.tableEnv.createStatementSet() util.tableEnv.getConfig.getConfiguration.setBoolean( RelNodeBlockPlanBuilder.TABLE_OPTIMIZER_UNIONALL_AS_BREAKPOINT_DISABLED, true) util.addFunction("split", new TableFunc1) val sqlQuery1 = """ |SELECT a, b - MOD(b, 300) AS b, c FROM MyTable |WHERE b >= UNIX_TIMESTAMP('${startTime}') """.stripMargin val table1 = util.tableEnv.sqlQuery(sqlQuery1) util.tableEnv.registerTable("table1", table1) val sqlQuery2 = "SELECT a, b, c1 AS c FROM table1, LATERAL TABLE(split(c)) AS T(c1) WHERE c <> '' " val table2 = util.tableEnv.sqlQuery(sqlQuery2) util.tableEnv.registerTable("table2", table2) val sqlQuery3 = "SELECT a, b, COUNT(DISTINCT c) AS total_c FROM table2 GROUP BY a, b" val table3 = util.tableEnv.sqlQuery(sqlQuery3) util.tableEnv.registerTable("table3", table3) val sqlQuery4 = "SELECT a, total_c FROM table3 UNION ALL SELECT a, 0 AS total_c FROM table1" val table4 = util.tableEnv.sqlQuery(sqlQuery4) util.tableEnv.registerTable("table4", table4) val sqlQuery5 = "SELECT * FROM table4 WHERE a > 50" val table5 = util.tableEnv.sqlQuery(sqlQuery5) val retractSink1 = util.createRetractTableSink(Array("a", "total_c"), Array(INT, LONG)) util.tableEnv.asInstanceOf[TableEnvironmentInternal].registerTableSinkInternal( "retractSink1", retractSink1) stmtSet.addInsert("retractSink1", table5) val sqlQuery6 = "SELECT * FROM table4 WHERE a < 50" val table6 = util.tableEnv.sqlQuery(sqlQuery6) val retractSink2 = util.createRetractTableSink(Array("a", "total_c"), Array(INT, LONG)) util.tableEnv.asInstanceOf[TableEnvironmentInternal].registerTableSinkInternal( "retractSink2", retractSink2) stmtSet.addInsert("retractSink2", table6) util.verifyPlan(stmtSet, ExplainDetail.CHANGELOG_MODE) } @Test def testMultiSinksSplitOnUnion1(): Unit = { val stmtSet = util.tableEnv.createStatementSet() util.tableEnv.getConfig.getConfiguration.setBoolean( RelNodeBlockPlanBuilder.TABLE_OPTIMIZER_UNIONALL_AS_BREAKPOINT_DISABLED, true) val table = util.tableEnv.sqlQuery( "SELECT a, c FROM MyTable UNION ALL SELECT d, f FROM MyTable1") util.tableEnv.registerTable("TempTable", table) val table1 = util.tableEnv.sqlQuery("SELECT SUM(a) AS total_sum FROM TempTable") val upsertSink = util.createUpsertTableSink(Array(), Array("total_sum"), Array(INT)) util.tableEnv.asInstanceOf[TableEnvironmentInternal] .registerTableSinkInternal("upsertSink", upsertSink) stmtSet.addInsert("upsertSink", table1) val table3 = util.tableEnv.sqlQuery("SELECT MIN(a) AS total_min FROM TempTable") val retractSink = util.createRetractTableSink(Array("total_min"), Array(INT)) util.tableEnv.asInstanceOf[TableEnvironmentInternal].registerTableSinkInternal( "retractSink", retractSink) stmtSet.addInsert("retractSink", table3) util.verifyPlan(stmtSet, ExplainDetail.CHANGELOG_MODE) } @Test def testMultiSinksSplitOnUnion2(): Unit = { val stmtSet = util.tableEnv.createStatementSet() util.tableEnv.getConfig.getConfiguration.setBoolean( RelNodeBlockPlanBuilder.TABLE_OPTIMIZER_REUSE_OPTIMIZE_BLOCK_WITH_DIGEST_ENABLED, true) util.tableEnv.getConfig.getConfiguration.setBoolean( RelNodeBlockPlanBuilder.TABLE_OPTIMIZER_UNIONALL_AS_BREAKPOINT_DISABLED, true) util.addTableSource[(Int, Long, String)]("MyTable2", 'a, 'b, 'c) val sqlQuery1 = """ |SELECT a, c FROM MyTable |UNION ALL |SELECT d, f FROM MyTable1 |UNION ALL |SELECT a, c FROM MyTable2 """.stripMargin val table = util.tableEnv.sqlQuery(sqlQuery1) util.tableEnv.registerTable("TempTable", table) val table1 = util.tableEnv.sqlQuery("SELECT SUM(a) AS total_sum FROM TempTable") val retractSink1 = util.createRetractTableSink(Array("total_sum"), Array(INT)) util.tableEnv.asInstanceOf[TableEnvironmentInternal].registerTableSinkInternal( "retractSink1", retractSink1) stmtSet.addInsert("retractSink1", table1) val table2 = util.tableEnv.sqlQuery("SELECT MIN(a) AS total_min FROM TempTable") val retractSink2 = util.createRetractTableSink(Array("total_min"), Array(INT)) util.tableEnv.asInstanceOf[TableEnvironmentInternal].registerTableSinkInternal( "retractSink2", retractSink2) stmtSet.addInsert("retractSink2", table2) val sqlQuery2 = "SELECT a FROM (SELECT a, c FROM MyTable UNION ALL SELECT d, f FROM MyTable1)" val table3 = util.tableEnv.sqlQuery(sqlQuery2) val appendSink3 = util.createAppendTableSink(Array("a"), Array(INT)) util.tableEnv.asInstanceOf[TableEnvironmentInternal].registerTableSinkInternal( "appendSink3", appendSink3) stmtSet.addInsert("appendSink3", table3) util.verifyPlan(stmtSet, ExplainDetail.CHANGELOG_MODE) } @Test def testMultiSinksSplitOnUnion3(): Unit = { val stmtSet = util.tableEnv.createStatementSet() util.tableEnv.getConfig.getConfiguration.setBoolean( RelNodeBlockPlanBuilder.TABLE_OPTIMIZER_UNIONALL_AS_BREAKPOINT_DISABLED, true) util.addTableSource[(Int, Long, String)]("MyTable2", 'a, 'b, 'c) val sqlQuery1 = "SELECT a, c FROM MyTable UNION ALL SELECT d, f FROM MyTable1" val table = util.tableEnv.sqlQuery(sqlQuery1) util.tableEnv.registerTable("TempTable", table) val appendSink = util.createAppendTableSink(Array("a", "c"), Array(INT, STRING)) util.tableEnv.asInstanceOf[TableEnvironmentInternal] .registerTableSinkInternal("appendSink", appendSink) stmtSet.addInsert("appendSink", table) val sqlQuery2 = "SELECT a, c FROM TempTable UNION ALL SELECT a, c FROM MyTable2" val table1 = util.tableEnv.sqlQuery(sqlQuery2) util.tableEnv.registerTable("TempTable1", table1) val table2 = util.tableEnv.sqlQuery("SELECT SUM(a) AS total_sum FROM TempTable1") val retractSink = util.createRetractTableSink(Array("total_sum"), Array(INT)) util.tableEnv.asInstanceOf[TableEnvironmentInternal].registerTableSinkInternal( "retractSink", retractSink) stmtSet.addInsert("retractSink", table2) val table3 = util.tableEnv.sqlQuery("SELECT MIN(a) AS total_min FROM TempTable1") val upsertSink = util.createUpsertTableSink(Array(), Array("total_min"), Array(INT)) util.tableEnv.asInstanceOf[TableEnvironmentInternal] .registerTableSinkInternal("upsertSink", upsertSink) stmtSet.addInsert("upsertSink", table3) util.verifyPlan(stmtSet, ExplainDetail.CHANGELOG_MODE) } @Test def testMultiSinksSplitOnUnion4(): Unit = { val stmtSet = util.tableEnv.createStatementSet() util.tableEnv.getConfig.getConfiguration.setBoolean( RelNodeBlockPlanBuilder.TABLE_OPTIMIZER_UNIONALL_AS_BREAKPOINT_DISABLED, true) util.addTableSource[(Int, Long, String)]("MyTable2", 'a, 'b, 'c) val sqlQuery = """ |SELECT a, c FROM MyTable |UNION ALL |SELECT d, f FROM MyTable1 |UNION ALL |SELECT a, c FROM MyTable2 """.stripMargin val table = util.tableEnv.sqlQuery(sqlQuery) util.tableEnv.registerTable("TempTable", table) val table1 = util.tableEnv.sqlQuery("SELECT SUM(a) AS total_sum FROM TempTable") val upsertSink = util.createUpsertTableSink(Array(), Array("total_sum"), Array(INT)) util.tableEnv.asInstanceOf[TableEnvironmentInternal].registerTableSinkInternal( "upsertSink", upsertSink) stmtSet.addInsert("upsertSink", table1) val table2 = util.tableEnv.sqlQuery("SELECT MIN(a) AS total_min FROM TempTable") val retractSink = util.createRetractTableSink(Array("total_min"), Array(INT)) util.tableEnv.asInstanceOf[TableEnvironmentInternal].registerTableSinkInternal( "retractSink", retractSink) stmtSet.addInsert("retractSink", table2) util.verifyPlan(stmtSet, ExplainDetail.CHANGELOG_MODE) } @Test def testUnionAndAggWithDifferentGroupings(): Unit = { val sqlQuery = """ |SELECT b, c, SUM(a) AS a_sum FROM MyTable GROUP BY b, c |UNION ALL |SELECT 1 AS b, c, SUM(a) AS a_sum FROM MyTable GROUP BY c """.stripMargin val table = util.tableEnv.sqlQuery(sqlQuery) val upsertSink = util.createUpsertTableSink(Array(), Array("b", "c", "a_sum"), Array(LONG, STRING, INT)) util.verifyPlanInsert(table, upsertSink, "upsertSink", ExplainDetail.CHANGELOG_MODE) } @Test def testUpdateAsRetractConsumedAtSinkBlock(): Unit = { val stmtSet = util.tableEnv.createStatementSet() val table = util.tableEnv.sqlQuery("SELECT a, b, c FROM MyTable") util.tableEnv.registerTable("TempTable", table) val sqlQuery = s""" |SELECT * FROM ( | SELECT a, b, c, | ROW_NUMBER() OVER (PARTITION BY b ORDER BY c DESC) as rank_num | FROM TempTable) |WHERE rank_num <= 10 """.stripMargin val table1 = util.tableEnv.sqlQuery(sqlQuery) val retractSink = util.createRetractTableSink( Array("a", "b", "c", "rank_num"), Array(INT, LONG, STRING, LONG)) util.tableEnv.asInstanceOf[TableEnvironmentInternal].registerTableSinkInternal( "retractSink", retractSink) stmtSet.addInsert("retractSink", table1) val upsertSink = util.createUpsertTableSink(Array(), Array("a", "b"), Array(INT, LONG)) val table2 = util.tableEnv.sqlQuery("SELECT a, b FROM TempTable WHERE a < 6") util.tableEnv.asInstanceOf[TableEnvironmentInternal].registerTableSinkInternal( "upsertSink", upsertSink) stmtSet.addInsert("upsertSink", table2) util.verifyPlan(stmtSet, ExplainDetail.CHANGELOG_MODE) } @Test def testUpdateAsRetractConsumedAtSourceBlock(): Unit = { val stmtSet = util.tableEnv.createStatementSet() val sqlQuery = s""" |SELECT * FROM ( | SELECT a, b, c, | ROW_NUMBER() OVER (PARTITION BY b ORDER BY c DESC) as rank_num | FROM MyTable) |WHERE rank_num <= 10 """.stripMargin val table = util.tableEnv.sqlQuery(sqlQuery) util.tableEnv.registerTable("TempTable", table) val table1 = util.tableEnv.sqlQuery("SELECT a FROM TempTable WHERE a > 6") val retractSink = util.createRetractTableSink(Array("a"), Array(INT)) util.tableEnv.asInstanceOf[TableEnvironmentInternal].registerTableSinkInternal( "retractSink", retractSink) stmtSet.addInsert("retractSink", table1) val table2 = util.tableEnv.sqlQuery("SELECT a, b FROM TempTable WHERE a < 6") val upsertSink = util.createUpsertTableSink(Array(), Array("a", "b"), Array(INT, LONG)) util.tableEnv.asInstanceOf[TableEnvironmentInternal].registerTableSinkInternal( "upsertSink", upsertSink) stmtSet.addInsert("upsertSink", table2) util.verifyPlan(stmtSet, ExplainDetail.CHANGELOG_MODE) } @Test def testMultiLevelViews(): Unit = { val stmtSet = util.tableEnv.createStatementSet() util.tableEnv.getConfig.getConfiguration.setBoolean( RelNodeBlockPlanBuilder.TABLE_OPTIMIZER_UNIONALL_AS_BREAKPOINT_DISABLED, true) val table1 = util.tableEnv.sqlQuery("SELECT a, b, c FROM MyTable WHERE c LIKE '%hello%'") util.tableEnv.registerTable("TempTable1", table1) val appendSink = util.createAppendTableSink(Array("a", "b", "c"), Array(INT, LONG, STRING)) util.tableEnv.asInstanceOf[TableEnvironmentInternal].registerTableSinkInternal( "appendSink", appendSink) stmtSet.addInsert("appendSink", table1) val table2 = util.tableEnv.sqlQuery("SELECT a, b, c FROM MyTable WHERE c LIKE '%world%'") util.tableEnv.registerTable("TempTable2", table2) val sqlQuery = """ |SELECT b, COUNT(a) AS cnt FROM ( | (SELECT * FROM TempTable1) | UNION ALL | (SELECT * FROM TempTable2) |) t |GROUP BY b """.stripMargin val table3 = util.tableEnv.sqlQuery(sqlQuery) util.tableEnv.registerTable("TempTable3", table3) val table4 = util.tableEnv.sqlQuery("SELECT b, cnt FROM TempTable3 WHERE b < 4") val retractSink = util.createRetractTableSink(Array("b", "cnt"), Array(LONG, LONG)) util.tableEnv.asInstanceOf[TableEnvironmentInternal].registerTableSinkInternal( "retractSink", retractSink) stmtSet.addInsert("retractSink", table4) val table5 = util.tableEnv.sqlQuery("SELECT b, cnt FROM TempTable3 WHERE b >=4 AND b < 6") val upsertSink = util.createUpsertTableSink(Array(), Array("b", "cnt"), Array(LONG, LONG)) util.tableEnv.asInstanceOf[TableEnvironmentInternal].registerTableSinkInternal( "upsertSink", upsertSink) stmtSet.addInsert("upsertSink", table5) util.verifyPlan(stmtSet, ExplainDetail.CHANGELOG_MODE) } @Test def testSharedUnionNode(): Unit = { val stmtSet = util.tableEnv.createStatementSet() util.tableEnv.getConfig.getConfiguration.setBoolean( RelNodeBlockPlanBuilder.TABLE_OPTIMIZER_UNIONALL_AS_BREAKPOINT_DISABLED, true) val table1 = util.tableEnv.sqlQuery("SELECT a, b, c FROM MyTable WHERE c LIKE '%hello%'") util.tableEnv.registerTable("TempTable1", table1) val appendSink = util.createAppendTableSink(Array("a", "b", "c"), Array(INT, LONG, STRING)) util.tableEnv.asInstanceOf[TableEnvironmentInternal].registerTableSinkInternal( "appendSink", appendSink) stmtSet.addInsert("appendSink", table1) val table2 = util.tableEnv.sqlQuery("SELECT a, b, c FROM MyTable WHERE c LIKE '%world%'") util.tableEnv.registerTable("TempTable2", table2) val sqlQuery1 = """ |SELECT * FROM TempTable1 |UNION ALL |SELECT * FROM TempTable2 """.stripMargin val table3 = util.tableEnv.sqlQuery(sqlQuery1) util.tableEnv.registerTable("TempTable3", table3) val table4 = util.tableEnv.sqlQuery("SELECT * FROM TempTable3 WHERE b >= 5") val retractSink1 = util.createRetractTableSink(Array("a", "b", "c"), Array(INT, LONG, STRING)) util.tableEnv.asInstanceOf[TableEnvironmentInternal].registerTableSinkInternal( "retractSink1", retractSink1) stmtSet.addInsert("retractSink1", table4) val table5 = util.tableEnv.sqlQuery("SELECT b, count(a) as cnt FROM TempTable3 GROUP BY b") util.tableEnv.registerTable("TempTable4", table5) val table6 = util.tableEnv.sqlQuery("SELECT b, cnt FROM TempTable4 WHERE b < 4") val retractSink2 = util.createRetractTableSink(Array("b", "cnt"), Array(LONG, LONG)) util.tableEnv.asInstanceOf[TableEnvironmentInternal].registerTableSinkInternal( "retractSink2", retractSink2) stmtSet.addInsert("retractSink2", table6) util.tableEnv.sqlQuery("SELECT b, cnt FROM TempTable4 WHERE b >=4 AND b < 6") val upsertSink = util.createUpsertTableSink(Array(), Array("b", "cnt"), Array(LONG, LONG)) util.tableEnv.asInstanceOf[TableEnvironmentInternal].registerTableSinkInternal( "upsertSink", upsertSink) stmtSet.addInsert("upsertSink", table6) util.verifyPlan(stmtSet, ExplainDetail.CHANGELOG_MODE) } }
tzulitai/flink
flink-table/flink-table-planner-blink/src/test/scala/org/apache/flink/table/planner/plan/stream/sql/DagOptimizationTest.scala
Scala
apache-2.0
27,665
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package kafka.log import java.io.File import java.nio._ import java.nio.file.{Files, Paths} import java.util.Properties import kafka.common._ import kafka.server.BrokerTopicStats import kafka.utils._ import org.apache.kafka.common.TopicPartition import org.apache.kafka.common.record._ import org.apache.kafka.common.utils.Utils import org.junit.Assert._ import org.junit.{After, Test} import org.scalatest.junit.JUnitSuite import scala.collection.JavaConverters._ import scala.collection._ /** * Unit tests for the log cleaning logic */ class LogCleanerTest extends JUnitSuite { val tmpdir = TestUtils.tempDir() val dir = TestUtils.randomPartitionLogDir(tmpdir) val logProps = new Properties() logProps.put(LogConfig.SegmentBytesProp, 1024: java.lang.Integer) logProps.put(LogConfig.SegmentIndexBytesProp, 1024: java.lang.Integer) logProps.put(LogConfig.CleanupPolicyProp, LogConfig.Compact) logProps.put(LogConfig.MessageTimestampDifferenceMaxMsProp, Long.MaxValue.toString) val logConfig = LogConfig(logProps) val time = new MockTime() val throttler = new Throttler(desiredRatePerSec = Double.MaxValue, checkIntervalMs = Long.MaxValue, time = time) @After def teardown(): Unit = { Utils.delete(tmpdir) } /** * Test simple log cleaning */ @Test def testCleanSegments(): Unit = { val cleaner = makeCleaner(Int.MaxValue) val logProps = new Properties() logProps.put(LogConfig.SegmentBytesProp, 1024: java.lang.Integer) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) // append messages to the log until we have four segments while(log.numberOfSegments < 4) log.appendAsLeader(record(log.logEndOffset.toInt, log.logEndOffset.toInt), leaderEpoch = 0) val keysFound = keysInLog(log) assertEquals(0L until log.logEndOffset, keysFound) // pretend we have the following keys val keys = immutable.ListSet(1, 3, 5, 7, 9) val map = new FakeOffsetMap(Int.MaxValue) keys.foreach(k => map.put(key(k), Long.MaxValue)) // clean the log val segments = log.logSegments.take(3).toSeq val stats = new CleanerStats() val expectedBytesRead = segments.map(_.size).sum cleaner.cleanSegments(log, segments, map, 0L, stats) val shouldRemain = keysInLog(log).filter(!keys.contains(_)) assertEquals(shouldRemain, keysInLog(log)) assertEquals(expectedBytesRead, stats.bytesRead) } @Test def testDuplicateCheckAfterCleaning(): Unit = { val cleaner = makeCleaner(Int.MaxValue) val logProps = new Properties() logProps.put(LogConfig.SegmentBytesProp, 2048: java.lang.Integer) var log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) val producerEpoch = 0.toShort val pid1 = 1 val pid2 = 2 val pid3 = 3 val pid4 = 4 appendIdempotentAsLeader(log, pid1, producerEpoch)(Seq(1, 2, 3)) appendIdempotentAsLeader(log, pid2, producerEpoch)(Seq(3, 1, 4)) appendIdempotentAsLeader(log, pid3, producerEpoch)(Seq(1, 4)) log.roll() cleaner.clean(LogToClean(new TopicPartition("test", 0), log, 0L, log.activeSegment.baseOffset)) assertEquals(List(2, 3, 3, 4, 1, 4), keysInLog(log)) assertEquals(List(1, 2, 3, 5, 6, 7), offsetsInLog(log)) // we have to reload the log to validate that the cleaner maintained sequence numbers correctly def reloadLog(): Unit = { log.close() log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps), recoveryPoint = 0L) } reloadLog() // check duplicate append from producer 1 var logAppendInfo = appendIdempotentAsLeader(log, pid1, producerEpoch)(Seq(1, 2, 3)) assertEquals(0L, logAppendInfo.firstOffset) assertEquals(2L, logAppendInfo.lastOffset) // check duplicate append from producer 3 logAppendInfo = appendIdempotentAsLeader(log, pid3, producerEpoch)(Seq(1, 4)) assertEquals(6L, logAppendInfo.firstOffset) assertEquals(7L, logAppendInfo.lastOffset) // check duplicate append from producer 2 logAppendInfo = appendIdempotentAsLeader(log, pid2, producerEpoch)(Seq(3, 1, 4)) assertEquals(3L, logAppendInfo.firstOffset) assertEquals(5L, logAppendInfo.lastOffset) // do one more append and a round of cleaning to force another deletion from producer 1's batch appendIdempotentAsLeader(log, pid4, producerEpoch)(Seq(2)) log.roll() cleaner.clean(LogToClean(new TopicPartition("test", 0), log, 0L, log.activeSegment.baseOffset)) assertEquals(List(3, 3, 4, 1, 4, 2), keysInLog(log)) assertEquals(List(2, 3, 5, 6, 7, 8), offsetsInLog(log)) reloadLog() // duplicate append from producer1 should still be fine logAppendInfo = appendIdempotentAsLeader(log, pid1, producerEpoch)(Seq(1, 2, 3)) assertEquals(0L, logAppendInfo.firstOffset) assertEquals(2L, logAppendInfo.lastOffset) } @Test def testBasicTransactionAwareCleaning(): Unit = { val cleaner = makeCleaner(Int.MaxValue) val logProps = new Properties() logProps.put(LogConfig.SegmentBytesProp, 2048: java.lang.Integer) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) val producerEpoch = 0.toShort val pid1 = 1 val pid2 = 2 val appendProducer1 = appendTransactionalAsLeader(log, pid1, producerEpoch) val appendProducer2 = appendTransactionalAsLeader(log, pid2, producerEpoch) appendProducer1(Seq(1, 2)) appendProducer2(Seq(2, 3)) appendProducer1(Seq(3, 4)) log.appendAsLeader(abortMarker(pid1, producerEpoch), leaderEpoch = 0, isFromClient = false) log.appendAsLeader(commitMarker(pid2, producerEpoch), leaderEpoch = 0, isFromClient = false) appendProducer1(Seq(2)) log.appendAsLeader(commitMarker(pid1, producerEpoch), leaderEpoch = 0, isFromClient = false) val abortedTransactions = log.collectAbortedTransactions(log.logStartOffset, log.logEndOffset) log.roll() cleaner.clean(LogToClean(new TopicPartition("test", 0), log, 0L, log.activeSegment.baseOffset)) assertEquals(List(3, 2), keysInLog(log)) assertEquals(List(3, 6, 7, 8, 9), offsetsInLog(log)) // ensure the transaction index is still correct assertEquals(abortedTransactions, log.collectAbortedTransactions(log.logStartOffset, log.logEndOffset)) } @Test def testCleanWithTransactionsSpanningSegments(): Unit = { val cleaner = makeCleaner(Int.MaxValue) val logProps = new Properties() logProps.put(LogConfig.SegmentBytesProp, 1024: java.lang.Integer) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) val producerEpoch = 0.toShort val pid1 = 1 val pid2 = 2 val pid3 = 3 val appendProducer1 = appendTransactionalAsLeader(log, pid1, producerEpoch) val appendProducer2 = appendTransactionalAsLeader(log, pid2, producerEpoch) val appendProducer3 = appendTransactionalAsLeader(log, pid3, producerEpoch) appendProducer1(Seq(1, 2)) appendProducer3(Seq(2, 3)) appendProducer2(Seq(3, 4)) log.roll() appendProducer2(Seq(5, 6)) appendProducer3(Seq(6, 7)) appendProducer1(Seq(7, 8)) log.appendAsLeader(abortMarker(pid2, producerEpoch), leaderEpoch = 0, isFromClient = false) appendProducer3(Seq(8, 9)) log.appendAsLeader(commitMarker(pid3, producerEpoch), leaderEpoch = 0, isFromClient = false) appendProducer1(Seq(9, 10)) log.appendAsLeader(abortMarker(pid1, producerEpoch), leaderEpoch = 0, isFromClient = false) // we have only cleaned the records in the first segment val dirtyOffset = cleaner.clean(LogToClean(new TopicPartition("test", 0), log, 0L, log.activeSegment.baseOffset))._1 assertEquals(List(2, 3, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10), keysInLog(log)) log.roll() // append a couple extra segments in the new segment to ensure we have sequence numbers appendProducer2(Seq(11)) appendProducer1(Seq(12)) // finally only the keys from pid3 should remain cleaner.clean(LogToClean(new TopicPartition("test", 0), log, dirtyOffset, log.activeSegment.baseOffset)) assertEquals(List(2, 3, 6, 7, 8, 9, 11, 12), keysInLog(log)) } @Test def testCommitMarkerRemoval(): Unit = { val tp = new TopicPartition("test", 0) val cleaner = makeCleaner(Int.MaxValue) val logProps = new Properties() logProps.put(LogConfig.SegmentBytesProp, 256: java.lang.Integer) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) val producerEpoch = 0.toShort val producerId = 1L val appendProducer = appendTransactionalAsLeader(log, producerId, producerEpoch) appendProducer(Seq(1)) appendProducer(Seq(2, 3)) log.appendAsLeader(commitMarker(producerId, producerEpoch), leaderEpoch = 0, isFromClient = false) appendProducer(Seq(2)) log.appendAsLeader(commitMarker(producerId, producerEpoch), leaderEpoch = 0, isFromClient = false) log.roll() // cannot remove the marker in this pass because there are still valid records var dirtyOffset = cleaner.doClean(LogToClean(tp, log, 0L, 100L), deleteHorizonMs = Long.MaxValue)._1 assertEquals(List(1, 3, 2), keysInLog(log)) assertEquals(List(0, 2, 3, 4, 5), offsetsInLog(log)) appendProducer(Seq(1, 3)) log.appendAsLeader(commitMarker(producerId, producerEpoch), leaderEpoch = 0, isFromClient = false) log.roll() // the first cleaning preserves the commit marker (at offset 3) since there were still records for the transaction dirtyOffset = cleaner.doClean(LogToClean(tp, log, dirtyOffset, 100L), deleteHorizonMs = Long.MaxValue)._1 assertEquals(List(2, 1, 3), keysInLog(log)) assertEquals(List(3, 4, 5, 6, 7, 8), offsetsInLog(log)) // delete horizon forced to 0 to verify marker is not removed early dirtyOffset = cleaner.doClean(LogToClean(tp, log, dirtyOffset, 100L), deleteHorizonMs = 0L)._1 assertEquals(List(2, 1, 3), keysInLog(log)) assertEquals(List(3, 4, 5, 6, 7, 8), offsetsInLog(log)) // clean again with large delete horizon and verify the marker is removed cleaner.doClean(LogToClean(tp, log, dirtyOffset, 100L), deleteHorizonMs = Long.MaxValue) assertEquals(List(2, 1, 3), keysInLog(log)) assertEquals(List(4, 5, 6, 7, 8), offsetsInLog(log)) } @Test def testAbortMarkerRemoval(): Unit = { val tp = new TopicPartition("test", 0) val cleaner = makeCleaner(Int.MaxValue) val logProps = new Properties() logProps.put(LogConfig.SegmentBytesProp, 256: java.lang.Integer) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) val producerEpoch = 0.toShort val producerId = 1L val appendProducer = appendTransactionalAsLeader(log, producerId, producerEpoch) appendProducer(Seq(1)) appendProducer(Seq(2, 3)) log.appendAsLeader(abortMarker(producerId, producerEpoch), leaderEpoch = 0, isFromClient = false) appendProducer(Seq(3)) log.appendAsLeader(commitMarker(producerId, producerEpoch), leaderEpoch = 0, isFromClient = false) log.roll() // delete horizon set to 0 to verify marker is not removed early val dirtyOffset = cleaner.doClean(LogToClean(tp, log, 0L, 100L), deleteHorizonMs = 0L)._1 assertEquals(List(3), keysInLog(log)) assertEquals(List(3, 4, 5), offsetsInLog(log)) // clean again with large delete horizon and verify the marker is removed cleaner.doClean(LogToClean(tp, log, dirtyOffset, 100L), deleteHorizonMs = Long.MaxValue) assertEquals(List(3), keysInLog(log)) assertEquals(List(4, 5), offsetsInLog(log)) } /** * Test log cleaning with logs containing messages larger than default message size */ @Test def testLargeMessage() { val largeMessageSize = 1024 * 1024 // Create cleaner with very small default max message size val cleaner = makeCleaner(Int.MaxValue, maxMessageSize=1024) val logProps = new Properties() logProps.put(LogConfig.SegmentBytesProp, largeMessageSize * 16: java.lang.Integer) logProps.put(LogConfig.MaxMessageBytesProp, largeMessageSize * 2: java.lang.Integer) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) while(log.numberOfSegments < 2) log.appendAsLeader(record(log.logEndOffset.toInt, Array.fill(largeMessageSize)(0: Byte)), leaderEpoch = 0) val keysFound = keysInLog(log) assertEquals(0L until log.logEndOffset, keysFound) // pretend we have the following keys val keys = immutable.ListSet(1, 3, 5, 7, 9) val map = new FakeOffsetMap(Int.MaxValue) keys.foreach(k => map.put(key(k), Long.MaxValue)) // clean the log val stats = new CleanerStats() cleaner.cleanSegments(log, Seq(log.logSegments.head), map, 0L, stats) val shouldRemain = keysInLog(log).filter(!keys.contains(_)) assertEquals(shouldRemain, keysInLog(log)) } @Test def testCleaningWithDeletes(): Unit = { val cleaner = makeCleaner(Int.MaxValue) val logProps = new Properties() logProps.put(LogConfig.SegmentBytesProp, 1024: java.lang.Integer) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) // append messages with the keys 0 through N while(log.numberOfSegments < 2) log.appendAsLeader(record(log.logEndOffset.toInt, log.logEndOffset.toInt), leaderEpoch = 0) // delete all even keys between 0 and N val leo = log.logEndOffset for(key <- 0 until leo.toInt by 2) log.appendAsLeader(tombstoneRecord(key), leaderEpoch = 0) // append some new unique keys to pad out to a new active segment while(log.numberOfSegments < 4) log.appendAsLeader(record(log.logEndOffset.toInt, log.logEndOffset.toInt), leaderEpoch = 0) cleaner.clean(LogToClean(new TopicPartition("test", 0), log, 0, log.activeSegment.baseOffset)) val keys = keysInLog(log).toSet assertTrue("None of the keys we deleted should still exist.", (0 until leo.toInt by 2).forall(!keys.contains(_))) } def testLogCleanerStats(): Unit = { // because loadFactor is 0.75, this means we can fit 2 messages in the map val cleaner = makeCleaner(2) val logProps = new Properties() logProps.put(LogConfig.SegmentBytesProp, 1024: java.lang.Integer) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) log.appendAsLeader(record(0,0), leaderEpoch = 0) // offset 0 log.appendAsLeader(record(1,1), leaderEpoch = 0) // offset 1 log.appendAsLeader(record(0,0), leaderEpoch = 0) // offset 2 log.appendAsLeader(record(1,1), leaderEpoch = 0) // offset 3 log.appendAsLeader(record(0,0), leaderEpoch = 0) // offset 4 // roll the segment, so we can clean the messages already appended log.roll() val initialLogSize = log.size val (endOffset, stats) = cleaner.clean(LogToClean(new TopicPartition("test", 0), log, 2, log.activeSegment.baseOffset)) assertEquals(5, endOffset) assertEquals(5, stats.messagesRead) assertEquals(initialLogSize, stats.bytesRead) assertEquals(2, stats.messagesWritten) assertEquals(log.size, stats.bytesWritten) assertEquals(0, stats.invalidMessagesRead) assertTrue(stats.endTime >= stats.startTime) } @Test def testLogCleanerRetainsProducerLastSequence(): Unit = { val cleaner = makeCleaner(10) val logProps = new Properties() logProps.put(LogConfig.SegmentBytesProp, 1024: java.lang.Integer) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) log.appendAsLeader(record(0, 0), leaderEpoch = 0) // offset 0 log.appendAsLeader(record(0, 1, producerId = 1, producerEpoch = 0, sequence = 0), leaderEpoch = 0) // offset 1 log.appendAsLeader(record(0, 2, producerId = 2, producerEpoch = 0, sequence = 0), leaderEpoch = 0) // offset 2 log.appendAsLeader(record(0, 3, producerId = 3, producerEpoch = 0, sequence = 0), leaderEpoch = 0) // offset 3 log.appendAsLeader(record(1, 1, producerId = 2, producerEpoch = 0, sequence = 1), leaderEpoch = 0) // offset 4 // roll the segment, so we can clean the messages already appended log.roll() cleaner.clean(LogToClean(new TopicPartition("test", 0), log, 0L, log.activeSegment.baseOffset)) assertEquals(List(0, 0, 1), keysInLog(log)) assertEquals(List(1, 3, 4), offsetsInLog(log)) } @Test def testLogCleanerRetainsLastSequenceEvenIfTransactionAborted(): Unit = { val cleaner = makeCleaner(10) val logProps = new Properties() logProps.put(LogConfig.SegmentBytesProp, 1024: java.lang.Integer) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) val producerEpoch = 0.toShort val producerId = 1L val appendProducer = appendTransactionalAsLeader(log, producerId, producerEpoch) appendProducer(Seq(1)) appendProducer(Seq(2, 3)) log.appendAsLeader(abortMarker(producerId, producerEpoch), leaderEpoch = 0, isFromClient = false) log.roll() cleaner.clean(LogToClean(new TopicPartition("test", 0), log, 0L, log.activeSegment.baseOffset)) assertEquals(List(3), keysInLog(log)) assertEquals(List(2, 3), offsetsInLog(log)) } @Test def testPartialSegmentClean(): Unit = { // because loadFactor is 0.75, this means we can fit 2 messages in the map val cleaner = makeCleaner(2) val logProps = new Properties() logProps.put(LogConfig.SegmentBytesProp, 1024: java.lang.Integer) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) log.appendAsLeader(record(0,0), leaderEpoch = 0) // offset 0 log.appendAsLeader(record(1,1), leaderEpoch = 0) // offset 1 log.appendAsLeader(record(0,0), leaderEpoch = 0) // offset 2 log.appendAsLeader(record(1,1), leaderEpoch = 0) // offset 3 log.appendAsLeader(record(0,0), leaderEpoch = 0) // offset 4 // roll the segment, so we can clean the messages already appended log.roll() // clean the log with only one message removed cleaner.clean(LogToClean(new TopicPartition("test", 0), log, 2, log.activeSegment.baseOffset)) assertEquals(List(1,0,1,0), keysInLog(log)) assertEquals(List(1,2,3,4), offsetsInLog(log)) // continue to make progress, even though we can only clean one message at a time cleaner.clean(LogToClean(new TopicPartition("test", 0), log, 3, log.activeSegment.baseOffset)) assertEquals(List(0,1,0), keysInLog(log)) assertEquals(List(2,3,4), offsetsInLog(log)) cleaner.clean(LogToClean(new TopicPartition("test", 0), log, 4, log.activeSegment.baseOffset)) assertEquals(List(1,0), keysInLog(log)) assertEquals(List(3,4), offsetsInLog(log)) } @Test def testCleaningWithUncleanableSection(): Unit = { val cleaner = makeCleaner(Int.MaxValue) val logProps = new Properties() logProps.put(LogConfig.SegmentBytesProp, 1024: java.lang.Integer) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) // Number of distinct keys. For an effective test this should be small enough such that each log segment contains some duplicates. val N = 10 val numCleanableSegments = 2 val numTotalSegments = 7 // append messages with the keys 0 through N-1, values equal offset while(log.numberOfSegments <= numCleanableSegments) log.appendAsLeader(record(log.logEndOffset.toInt % N, log.logEndOffset.toInt), leaderEpoch = 0) // at this point one message past the cleanable segments has been added // the entire segment containing the first uncleanable offset should not be cleaned. val firstUncleanableOffset = log.logEndOffset + 1 // +1 so it is past the baseOffset while(log.numberOfSegments < numTotalSegments - 1) log.appendAsLeader(record(log.logEndOffset.toInt % N, log.logEndOffset.toInt), leaderEpoch = 0) // the last (active) segment has just one message def distinctValuesBySegment = log.logSegments.map(s => s.log.records.asScala.map(record => TestUtils.readString(record.value)).toSet.size).toSeq val disctinctValuesBySegmentBeforeClean = distinctValuesBySegment assertTrue("Test is not effective unless each segment contains duplicates. Increase segment size or decrease number of keys.", distinctValuesBySegment.reverse.tail.forall(_ > N)) cleaner.clean(LogToClean(new TopicPartition("test", 0), log, 0, firstUncleanableOffset)) val distinctValuesBySegmentAfterClean = distinctValuesBySegment assertTrue("The cleanable segments should have fewer number of values after cleaning", disctinctValuesBySegmentBeforeClean.zip(distinctValuesBySegmentAfterClean).take(numCleanableSegments).forall { case (before, after) => after < before }) assertTrue("The uncleanable segments should have the same number of values after cleaning", disctinctValuesBySegmentBeforeClean.zip(distinctValuesBySegmentAfterClean) .slice(numCleanableSegments, numTotalSegments).forall { x => x._1 == x._2 }) } @Test def testLogToClean(): Unit = { // create a log with small segment size val logProps = new Properties() logProps.put(LogConfig.SegmentBytesProp, 100: java.lang.Integer) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) // create 6 segments with only one message in each segment def createRecorcs = TestUtils.singletonRecords(value = Array.fill[Byte](25)(0), key = 1.toString.getBytes) for (_ <- 0 until 6) log.appendAsLeader(createRecorcs, leaderEpoch = 0) val logToClean = LogToClean(new TopicPartition("test", 0), log, log.activeSegment.baseOffset, log.activeSegment.baseOffset) assertEquals("Total bytes of LogToClean should equal size of all segments excluding the active segment", logToClean.totalBytes, log.size - log.activeSegment.size) } @Test def testLogToCleanWithUncleanableSection(): Unit = { // create a log with small segment size val logProps = new Properties() logProps.put(LogConfig.SegmentBytesProp, 100: java.lang.Integer) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) // create 6 segments with only one message in each segment def createRecords = TestUtils.singletonRecords(value = Array.fill[Byte](25)(0), key = 1.toString.getBytes) for (_ <- 0 until 6) log.appendAsLeader(createRecords, leaderEpoch = 0) // segments [0,1] are clean; segments [2, 3] are cleanable; segments [4,5] are uncleanable val segs = log.logSegments.toSeq val logToClean = LogToClean(new TopicPartition("test", 0), log, segs(2).baseOffset, segs(4).baseOffset) val expectedCleanSize = segs.take(2).map(_.size).sum val expectedCleanableSize = segs.slice(2, 4).map(_.size).sum assertEquals("Uncleanable bytes of LogToClean should equal size of all segments prior the one containing first dirty", logToClean.cleanBytes, expectedCleanSize) assertEquals("Cleanable bytes of LogToClean should equal size of all segments from the one containing first dirty offset" + " to the segment prior to the one with the first uncleanable offset", logToClean.cleanableBytes, expectedCleanableSize) assertEquals("Total bytes should be the sum of the clean and cleanable segments", logToClean.totalBytes, expectedCleanSize + expectedCleanableSize) assertEquals("Total cleanable ratio should be the ratio of cleanable size to clean plus cleanable", logToClean.cleanableRatio, expectedCleanableSize / (expectedCleanSize + expectedCleanableSize).toDouble, 1.0e-6d) } @Test def testCleaningWithUnkeyedMessages(): Unit = { val cleaner = makeCleaner(Int.MaxValue) // create a log with compaction turned off so we can append unkeyed messages val logProps = new Properties() logProps.put(LogConfig.SegmentBytesProp, 1024: java.lang.Integer) logProps.put(LogConfig.CleanupPolicyProp, LogConfig.Delete) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) // append unkeyed messages while(log.numberOfSegments < 2) log.appendAsLeader(unkeyedRecord(log.logEndOffset.toInt), leaderEpoch = 0) val numInvalidMessages = unkeyedMessageCountInLog(log) val sizeWithUnkeyedMessages = log.size // append keyed messages while(log.numberOfSegments < 3) log.appendAsLeader(record(log.logEndOffset.toInt, log.logEndOffset.toInt), leaderEpoch = 0) val expectedSizeAfterCleaning = log.size - sizeWithUnkeyedMessages val (_, stats) = cleaner.clean(LogToClean(new TopicPartition("test", 0), log, 0, log.activeSegment.baseOffset)) assertEquals("Log should only contain keyed messages after cleaning.", 0, unkeyedMessageCountInLog(log)) assertEquals("Log should only contain keyed messages after cleaning.", expectedSizeAfterCleaning, log.size) assertEquals("Cleaner should have seen %d invalid messages.", numInvalidMessages, stats.invalidMessagesRead) } /* extract all the keys from a log */ def keysInLog(log: Log): Iterable[Int] = { for (segment <- log.logSegments; batch <- segment.log.batches.asScala if !batch.isControlBatch; record <- batch.asScala if record.hasValue && record.hasKey) yield TestUtils.readString(record.key).toInt } /* extract all the offsets from a log */ def offsetsInLog(log: Log): Iterable[Long] = log.logSegments.flatMap(s => s.log.records.asScala.filter(_.hasValue).filter(_.hasKey).map(m => m.offset)) def unkeyedMessageCountInLog(log: Log) = log.logSegments.map(s => s.log.records.asScala.filter(_.hasValue).count(m => !m.hasKey)).sum def abortCheckDone(topicPartition: TopicPartition): Unit = { throw new LogCleaningAbortedException() } /** * Test that abortion during cleaning throws a LogCleaningAbortedException */ @Test def testCleanSegmentsWithAbort(): Unit = { val cleaner = makeCleaner(Int.MaxValue, abortCheckDone) val logProps = new Properties() logProps.put(LogConfig.SegmentBytesProp, 1024: java.lang.Integer) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) // append messages to the log until we have four segments while(log.numberOfSegments < 4) log.appendAsLeader(record(log.logEndOffset.toInt, log.logEndOffset.toInt), leaderEpoch = 0) val keys = keysInLog(log) val map = new FakeOffsetMap(Int.MaxValue) keys.foreach(k => map.put(key(k), Long.MaxValue)) intercept[LogCleaningAbortedException] { cleaner.cleanSegments(log, log.logSegments.take(3).toSeq, map, 0L, new CleanerStats()) } } /** * Validate the logic for grouping log segments together for cleaning */ @Test def testSegmentGrouping(): Unit = { val cleaner = makeCleaner(Int.MaxValue) val logProps = new Properties() logProps.put(LogConfig.SegmentBytesProp, 300: java.lang.Integer) logProps.put(LogConfig.IndexIntervalBytesProp, 1: java.lang.Integer) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) // append some messages to the log var i = 0 while(log.numberOfSegments < 10) { log.appendAsLeader(TestUtils.singletonRecords(value = "hello".getBytes, key = "hello".getBytes), leaderEpoch = 0) i += 1 } // grouping by very large values should result in a single group with all the segments in it var groups = cleaner.groupSegmentsBySize(log.logSegments, maxSize = Int.MaxValue, maxIndexSize = Int.MaxValue, log.logEndOffset) assertEquals(1, groups.size) assertEquals(log.numberOfSegments, groups.head.size) checkSegmentOrder(groups) // grouping by very small values should result in all groups having one entry groups = cleaner.groupSegmentsBySize(log.logSegments, maxSize = 1, maxIndexSize = Int.MaxValue, log.logEndOffset) assertEquals(log.numberOfSegments, groups.size) assertTrue("All groups should be singletons.", groups.forall(_.size == 1)) checkSegmentOrder(groups) groups = cleaner.groupSegmentsBySize(log.logSegments, maxSize = Int.MaxValue, maxIndexSize = 1, log.logEndOffset) assertEquals(log.numberOfSegments, groups.size) assertTrue("All groups should be singletons.", groups.forall(_.size == 1)) checkSegmentOrder(groups) val groupSize = 3 // check grouping by log size val logSize = log.logSegments.take(groupSize).map(_.size).sum.toInt + 1 groups = cleaner.groupSegmentsBySize(log.logSegments, maxSize = logSize, maxIndexSize = Int.MaxValue, log.logEndOffset) checkSegmentOrder(groups) assertTrue("All but the last group should be the target size.", groups.dropRight(1).forall(_.size == groupSize)) // check grouping by index size val indexSize = log.logSegments.take(groupSize).map(_.index.sizeInBytes).sum + 1 groups = cleaner.groupSegmentsBySize(log.logSegments, maxSize = Int.MaxValue, maxIndexSize = indexSize, log.logEndOffset) checkSegmentOrder(groups) assertTrue("All but the last group should be the target size.", groups.dropRight(1).forall(_.size == groupSize)) } /** * Validate the logic for grouping log segments together for cleaning when only a small number of * messages are retained, but the range of offsets is greater than Int.MaxValue. A group should not * contain a range of offsets greater than Int.MaxValue to ensure that relative offsets can be * stored in 4 bytes. */ @Test def testSegmentGroupingWithSparseOffsets(): Unit = { val cleaner = makeCleaner(Int.MaxValue) val logProps = new Properties() logProps.put(LogConfig.SegmentBytesProp, 400: java.lang.Integer) logProps.put(LogConfig.IndexIntervalBytesProp, 1: java.lang.Integer) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) // fill up first segment while (log.numberOfSegments == 1) log.appendAsLeader(TestUtils.singletonRecords(value = "hello".getBytes, key = "hello".getBytes), leaderEpoch = 0) // forward offset and append message to next segment at offset Int.MaxValue val records = messageWithOffset("hello".getBytes, "hello".getBytes, Int.MaxValue - 1) log.appendAsFollower(records) log.appendAsLeader(TestUtils.singletonRecords(value = "hello".getBytes, key = "hello".getBytes), leaderEpoch = 0) assertEquals(Int.MaxValue, log.activeSegment.index.lastOffset) // grouping should result in a single group with maximum relative offset of Int.MaxValue var groups = cleaner.groupSegmentsBySize(log.logSegments, maxSize = Int.MaxValue, maxIndexSize = Int.MaxValue, log.logEndOffset) assertEquals(1, groups.size) // append another message, making last offset of second segment > Int.MaxValue log.appendAsLeader(TestUtils.singletonRecords(value = "hello".getBytes, key = "hello".getBytes), leaderEpoch = 0) // grouping should not group the two segments to ensure that maximum relative offset in each group <= Int.MaxValue groups = cleaner.groupSegmentsBySize(log.logSegments, maxSize = Int.MaxValue, maxIndexSize = Int.MaxValue, log.logEndOffset) assertEquals(2, groups.size) checkSegmentOrder(groups) // append more messages, creating new segments, further grouping should still occur while (log.numberOfSegments < 4) log.appendAsLeader(TestUtils.singletonRecords(value = "hello".getBytes, key = "hello".getBytes), leaderEpoch = 0) groups = cleaner.groupSegmentsBySize(log.logSegments, maxSize = Int.MaxValue, maxIndexSize = Int.MaxValue, log.logEndOffset) assertEquals(log.numberOfSegments - 1, groups.size) for (group <- groups) assertTrue("Relative offset greater than Int.MaxValue", group.last.index.lastOffset - group.head.index.baseOffset <= Int.MaxValue) checkSegmentOrder(groups) } /** * Following the loading of a log segment where the index file is zero sized, * the index returned would be the base offset. Sometimes the log file would * contain data with offsets in excess of the baseOffset which would cause * the log cleaner to group together segments with a range of > Int.MaxValue * this test replicates that scenario to ensure that the segments are grouped * correctly. */ @Test def testSegmentGroupingFollowingLoadOfZeroIndex(): Unit = { val cleaner = makeCleaner(Int.MaxValue) val logProps = new Properties() logProps.put(LogConfig.SegmentBytesProp, 400: java.lang.Integer) //mimic the effect of loading an empty index file logProps.put(LogConfig.IndexIntervalBytesProp, 400: java.lang.Integer) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) val record1 = messageWithOffset("hello".getBytes, "hello".getBytes, 0) log.appendAsFollower(record1) val record2 = messageWithOffset("hello".getBytes, "hello".getBytes, 1) log.appendAsFollower(record2) log.roll(Int.MaxValue/2) // starting a new log segment at offset Int.MaxValue/2 val record3 = messageWithOffset("hello".getBytes, "hello".getBytes, Int.MaxValue/2) log.appendAsFollower(record3) val record4 = messageWithOffset("hello".getBytes, "hello".getBytes, Int.MaxValue.toLong + 1) log.appendAsFollower(record4) assertTrue("Actual offset range should be > Int.MaxValue", log.logEndOffset - 1 - log.logStartOffset > Int.MaxValue) assertTrue("index.lastOffset is reporting the wrong last offset", log.logSegments.last.index.lastOffset - log.logStartOffset <= Int.MaxValue) // grouping should result in two groups because the second segment takes the offset range > MaxInt val groups = cleaner.groupSegmentsBySize(log.logSegments, maxSize = Int.MaxValue, maxIndexSize = Int.MaxValue, log.logEndOffset) assertEquals(2, groups.size) for (group <- groups) assertTrue("Relative offset greater than Int.MaxValue", group.last.nextOffset() - 1 - group.head.baseOffset <= Int.MaxValue) checkSegmentOrder(groups) } private def checkSegmentOrder(groups: Seq[Seq[LogSegment]]): Unit = { val offsets = groups.flatMap(_.map(_.baseOffset)) assertEquals("Offsets should be in increasing order.", offsets.sorted, offsets) } /** * Test building an offset map off the log */ @Test def testBuildOffsetMap(): Unit = { val map = new FakeOffsetMap(1000) val log = makeLog() val cleaner = makeCleaner(Int.MaxValue) val start = 0 val end = 500 writeToLog(log, (start until end) zip (start until end)) def checkRange(map: FakeOffsetMap, start: Int, end: Int) { val stats = new CleanerStats() cleaner.buildOffsetMap(log, start, end, map, stats) val endOffset = map.latestOffset + 1 assertEquals("Last offset should be the end offset.", end, endOffset) assertEquals("Should have the expected number of messages in the map.", end-start, map.size) for(i <- start until end) assertEquals("Should find all the keys", i.toLong, map.get(key(i))) assertEquals("Should not find a value too small", -1L, map.get(key(start - 1))) assertEquals("Should not find a value too large", -1L, map.get(key(end))) assertEquals(end - start, stats.mapMessagesRead) } val segments = log.logSegments.toSeq checkRange(map, 0, segments(1).baseOffset.toInt) checkRange(map, segments(1).baseOffset.toInt, segments(3).baseOffset.toInt) checkRange(map, segments(3).baseOffset.toInt, log.logEndOffset.toInt) } /** * Tests recovery if broker crashes at the following stages during the cleaning sequence * <ol> * <li> Cleaner has created .cleaned log containing multiple segments, swap sequence not yet started * <li> .cleaned log renamed to .swap, old segment files not yet renamed to .deleted * <li> .cleaned log renamed to .swap, old segment files renamed to .deleted, but not yet deleted * <li> .swap suffix removed, completing the swap, but async delete of .deleted files not yet complete * </ol> */ @Test def testRecoveryAfterCrash(): Unit = { val cleaner = makeCleaner(Int.MaxValue) val logProps = new Properties() logProps.put(LogConfig.SegmentBytesProp, 300: java.lang.Integer) logProps.put(LogConfig.IndexIntervalBytesProp, 1: java.lang.Integer) logProps.put(LogConfig.FileDeleteDelayMsProp, 10: java.lang.Integer) val config = LogConfig.fromProps(logConfig.originals, logProps) def recoverAndCheck(config: LogConfig, expectedKeys : Iterable[Int]) : Log = { // Recover log file and check that after recovery, keys are as expected // and all temporary files have been deleted val recoveredLog = makeLog(config = config) time.sleep(config.fileDeleteDelayMs + 1) for (file <- dir.listFiles) { assertFalse("Unexpected .deleted file after recovery", file.getName.endsWith(Log.DeletedFileSuffix)) assertFalse("Unexpected .cleaned file after recovery", file.getName.endsWith(Log.CleanedFileSuffix)) assertFalse("Unexpected .swap file after recovery", file.getName.endsWith(Log.SwapFileSuffix)) } assertEquals(expectedKeys, keysInLog(recoveredLog)) recoveredLog } // create a log and append some messages var log = makeLog(config = config) var messageCount = 0 while(log.numberOfSegments < 10) { log.appendAsLeader(record(log.logEndOffset.toInt, log.logEndOffset.toInt), leaderEpoch = 0) messageCount += 1 } val allKeys = keysInLog(log) // pretend we have odd-numbered keys val offsetMap = new FakeOffsetMap(Int.MaxValue) for (k <- 1 until messageCount by 2) offsetMap.put(key(k), Long.MaxValue) // clean the log cleaner.cleanSegments(log, log.logSegments.take(9).toSeq, offsetMap, 0L, new CleanerStats()) var cleanedKeys = keysInLog(log) // 1) Simulate recovery just after .cleaned file is created, before rename to .swap // On recovery, clean operation is aborted. All messages should be present in the log log.logSegments.head.changeFileSuffixes("", Log.CleanedFileSuffix) for (file <- dir.listFiles if file.getName.endsWith(Log.DeletedFileSuffix)) { Utils.atomicMoveWithFallback(file.toPath, Paths.get(CoreUtils.replaceSuffix(file.getPath, Log.DeletedFileSuffix, ""))) } log = recoverAndCheck(config, allKeys) // clean again cleaner.cleanSegments(log, log.logSegments.take(9).toSeq, offsetMap, 0L, new CleanerStats()) cleanedKeys = keysInLog(log) // 2) Simulate recovery just after swap file is created, before old segment files are // renamed to .deleted. Clean operation is resumed during recovery. log.logSegments.head.changeFileSuffixes("", Log.SwapFileSuffix) for (file <- dir.listFiles if file.getName.endsWith(Log.DeletedFileSuffix)) { Utils.atomicMoveWithFallback(file.toPath, Paths.get(CoreUtils.replaceSuffix(file.getPath, Log.DeletedFileSuffix, ""))) } log = recoverAndCheck(config, cleanedKeys) // add some more messages and clean the log again while(log.numberOfSegments < 10) { log.appendAsLeader(record(log.logEndOffset.toInt, log.logEndOffset.toInt), leaderEpoch = 0) messageCount += 1 } for (k <- 1 until messageCount by 2) offsetMap.put(key(k), Long.MaxValue) cleaner.cleanSegments(log, log.logSegments.take(9).toSeq, offsetMap, 0L, new CleanerStats()) cleanedKeys = keysInLog(log) // 3) Simulate recovery after swap file is created and old segments files are renamed // to .deleted. Clean operation is resumed during recovery. log.logSegments.head.changeFileSuffixes("", Log.SwapFileSuffix) log = recoverAndCheck(config, cleanedKeys) // add some more messages and clean the log again while(log.numberOfSegments < 10) { log.appendAsLeader(record(log.logEndOffset.toInt, log.logEndOffset.toInt), leaderEpoch = 0) messageCount += 1 } for (k <- 1 until messageCount by 2) offsetMap.put(key(k), Long.MaxValue) cleaner.cleanSegments(log, log.logSegments.take(9).toSeq, offsetMap, 0L, new CleanerStats()) cleanedKeys = keysInLog(log) // 4) Simulate recovery after swap is complete, but async deletion // is not yet complete. Clean operation is resumed during recovery. recoverAndCheck(config, cleanedKeys) } @Test def testBuildOffsetMapFakeLarge(): Unit = { val map = new FakeOffsetMap(1000) val logProps = new Properties() logProps.put(LogConfig.SegmentBytesProp, 120: java.lang.Integer) logProps.put(LogConfig.SegmentIndexBytesProp, 120: java.lang.Integer) logProps.put(LogConfig.CleanupPolicyProp, LogConfig.Compact) val logConfig = LogConfig(logProps) val log = makeLog(config = logConfig) val cleaner = makeCleaner(Int.MaxValue) val start = 0 val end = 2 val offsetSeq = Seq(0L, 7206178L) writeToLog(log, (start until end) zip (start until end), offsetSeq) cleaner.buildOffsetMap(log, start, end, map, new CleanerStats()) val endOffset = map.latestOffset assertEquals("Last offset should be the end offset.", 7206178L, endOffset) assertEquals("Should have the expected number of messages in the map.", end - start, map.size) assertEquals("Map should contain first value", 0L, map.get(key(0))) assertEquals("Map should contain second value", 7206178L, map.get(key(1))) } /** * Test building a partial offset map of part of a log segment */ @Test def testBuildPartialOffsetMap(): Unit = { // because loadFactor is 0.75, this means we can fit 2 messages in the map val map = new FakeOffsetMap(3) val log = makeLog() val cleaner = makeCleaner(2) log.appendAsLeader(record(0,0), leaderEpoch = 0) log.appendAsLeader(record(1,1), leaderEpoch = 0) log.appendAsLeader(record(2,2), leaderEpoch = 0) log.appendAsLeader(record(3,3), leaderEpoch = 0) log.appendAsLeader(record(4,4), leaderEpoch = 0) log.roll() val stats = new CleanerStats() cleaner.buildOffsetMap(log, 2, Int.MaxValue, map, stats) assertEquals(2, map.size) assertEquals(-1, map.get(key(0))) assertEquals(2, map.get(key(2))) assertEquals(3, map.get(key(3))) assertEquals(-1, map.get(key(4))) assertEquals(4, stats.mapMessagesRead) } /** * This test verifies that messages corrupted by KAFKA-4298 are fixed by the cleaner */ @Test def testCleanCorruptMessageSet() { val codec = CompressionType.GZIP val logProps = new Properties() logProps.put(LogConfig.CompressionTypeProp, codec.name) val logConfig = LogConfig(logProps) val log = makeLog(config = logConfig) val cleaner = makeCleaner(10) // messages are constructed so that the payload matches the expecting offset to // make offset validation easier after cleaning // one compressed log entry with duplicates val dupSetKeys = (0 until 2) ++ (0 until 2) val dupSetOffset = 25 val dupSet = dupSetKeys zip (dupSetOffset until dupSetOffset + dupSetKeys.size) // and one without (should still be fixed by the cleaner) val noDupSetKeys = 3 until 5 val noDupSetOffset = 50 val noDupSet = noDupSetKeys zip (noDupSetOffset until noDupSetOffset + noDupSetKeys.size) log.appendAsFollower(invalidCleanedMessage(dupSetOffset, dupSet, codec)) log.appendAsFollower(invalidCleanedMessage(noDupSetOffset, noDupSet, codec)) log.roll() cleaner.clean(LogToClean(new TopicPartition("test", 0), log, 0, log.activeSegment.baseOffset)) for (segment <- log.logSegments; batch <- segment.log.batches.asScala; record <- batch.asScala) { assertTrue(record.hasMagic(batch.magic)) val value = TestUtils.readString(record.value).toLong assertEquals(record.offset, value) } } /** * Verify that the client can handle corrupted messages. Located here for now since the client * does not support writing messages with the old magic. */ @Test def testClientHandlingOfCorruptMessageSet(): Unit = { import JavaConverters._ val keys = 1 until 10 val offset = 50 val set = keys zip (offset until offset + keys.size) val corruptedMessage = invalidCleanedMessage(offset, set) val records = MemoryRecords.readableRecords(corruptedMessage.buffer) for (logEntry <- records.records.asScala) { val offset = logEntry.offset val value = TestUtils.readString(logEntry.value).toLong assertEquals(offset, value) } } @Test def testCleanTombstone(): Unit = { val logConfig = LogConfig(new Properties()) val log = makeLog(config = logConfig) val cleaner = makeCleaner(10) // Append a message with a large timestamp. log.appendAsLeader(TestUtils.singletonRecords(value = "0".getBytes, key = "0".getBytes, timestamp = time.milliseconds() + logConfig.deleteRetentionMs + 10000), leaderEpoch = 0) log.roll() cleaner.clean(LogToClean(new TopicPartition("test", 0), log, 0, log.activeSegment.baseOffset)) // Append a tombstone with a small timestamp and roll out a new log segment. log.appendAsLeader(TestUtils.singletonRecords(value = null, key = "0".getBytes, timestamp = time.milliseconds() - logConfig.deleteRetentionMs - 10000), leaderEpoch = 0) log.roll() cleaner.clean(LogToClean(new TopicPartition("test", 0), log, 1, log.activeSegment.baseOffset)) assertEquals("The tombstone should be retained.", 1, log.logSegments.head.log.batches.iterator.next().lastOffset) // Append a message and roll out another log segment. log.appendAsLeader(TestUtils.singletonRecords(value = "1".getBytes, key = "1".getBytes, timestamp = time.milliseconds()), leaderEpoch = 0) log.roll() cleaner.clean(LogToClean(new TopicPartition("test", 0), log, 2, log.activeSegment.baseOffset)) assertEquals("The tombstone should be retained.", 1, log.logSegments.head.log.batches.iterator.next().lastOffset) } private def writeToLog(log: Log, keysAndValues: Iterable[(Int, Int)], offsetSeq: Iterable[Long]): Iterable[Long] = { for(((key, value), offset) <- keysAndValues.zip(offsetSeq)) yield log.appendAsFollower(messageWithOffset(key, value, offset)).lastOffset } private def invalidCleanedMessage(initialOffset: Long, keysAndValues: Iterable[(Int, Int)], codec: CompressionType = CompressionType.GZIP): MemoryRecords = { // this function replicates the old versions of the cleaner which under some circumstances // would write invalid compressed message sets with the outer magic set to 1 and the inner // magic set to 0 val records = keysAndValues.map(kv => LegacyRecord.create(RecordBatch.MAGIC_VALUE_V0, RecordBatch.NO_TIMESTAMP, kv._1.toString.getBytes, kv._2.toString.getBytes)) val buffer = ByteBuffer.allocate(math.min(math.max(records.map(_.sizeInBytes()).sum / 2, 1024), 1 << 16)) val builder = MemoryRecords.builder(buffer, RecordBatch.MAGIC_VALUE_V1, codec, TimestampType.CREATE_TIME, initialOffset) var offset = initialOffset records.foreach { record => builder.appendUncheckedWithOffset(offset, record) offset += 1 } builder.build() } private def messageWithOffset(key: Array[Byte], value: Array[Byte], offset: Long): MemoryRecords = MemoryRecords.withRecords(offset, CompressionType.NONE, 0, new SimpleRecord(key, value)) private def messageWithOffset(key: Int, value: Int, offset: Long): MemoryRecords = messageWithOffset(key.toString.getBytes, value.toString.getBytes, offset) private def makeLog(dir: File = dir, config: LogConfig = logConfig, recoveryPoint: Long = 0L) = Log(dir = dir, config = config, logStartOffset = 0L, recoveryPoint = recoveryPoint, scheduler = time.scheduler, time = time, brokerTopicStats = new BrokerTopicStats) private def noOpCheckDone(topicPartition: TopicPartition) { /* do nothing */ } private def makeCleaner(capacity: Int, checkDone: TopicPartition => Unit = noOpCheckDone, maxMessageSize: Int = 64*1024) = new Cleaner(id = 0, offsetMap = new FakeOffsetMap(capacity), ioBufferSize = maxMessageSize, maxIoBufferSize = maxMessageSize, dupBufferLoadFactor = 0.75, throttler = throttler, time = time, checkDone = checkDone) private def writeToLog(log: Log, seq: Iterable[(Int, Int)]): Iterable[Long] = { for((key, value) <- seq) yield log.appendAsLeader(record(key, value), leaderEpoch = 0).firstOffset } private def key(id: Int) = ByteBuffer.wrap(id.toString.getBytes) private def record(key: Int, value: Int, producerId: Long = RecordBatch.NO_PRODUCER_ID, producerEpoch: Short = RecordBatch.NO_PRODUCER_EPOCH, sequence: Int = RecordBatch.NO_SEQUENCE, partitionLeaderEpoch: Int = RecordBatch.NO_PARTITION_LEADER_EPOCH): MemoryRecords = { MemoryRecords.withIdempotentRecords(RecordBatch.CURRENT_MAGIC_VALUE, 0L, CompressionType.NONE, producerId, producerEpoch, sequence, partitionLeaderEpoch, new SimpleRecord(key.toString.getBytes, value.toString.getBytes)) } private def appendTransactionalAsLeader(log: Log, producerId: Long, producerEpoch: Short = 0): Seq[Int] => LogAppendInfo = { appendIdempotentAsLeader(log, producerId, producerEpoch, isTransactional = true) } private def appendIdempotentAsLeader(log: Log, producerId: Long, producerEpoch: Short = 0, isTransactional: Boolean = false): Seq[Int] => LogAppendInfo = { var sequence = 0 keys: Seq[Int] => { val simpleRecords = keys.map { key => val keyBytes = key.toString.getBytes new SimpleRecord(time.milliseconds(), keyBytes, keyBytes) // the value doesn't matter since we validate offsets } val records = if (isTransactional) MemoryRecords.withTransactionalRecords(CompressionType.NONE, producerId, producerEpoch, sequence, simpleRecords: _*) else MemoryRecords.withIdempotentRecords(CompressionType.NONE, producerId, producerEpoch, sequence, simpleRecords: _*) sequence += simpleRecords.size log.appendAsLeader(records, leaderEpoch = 0) } } private def commitMarker(producerId: Long, producerEpoch: Short, timestamp: Long = time.milliseconds()): MemoryRecords = endTxnMarker(producerId, producerEpoch, ControlRecordType.COMMIT, 0L, timestamp) private def abortMarker(producerId: Long, producerEpoch: Short, timestamp: Long = time.milliseconds()): MemoryRecords = endTxnMarker(producerId, producerEpoch, ControlRecordType.ABORT, 0L, timestamp) private def endTxnMarker(producerId: Long, producerEpoch: Short, controlRecordType: ControlRecordType, offset: Long, timestamp: Long): MemoryRecords = { val endTxnMarker = new EndTransactionMarker(controlRecordType, 0) MemoryRecords.withEndTransactionMarker(offset, timestamp, RecordBatch.NO_PARTITION_LEADER_EPOCH, producerId, producerEpoch, endTxnMarker) } private def record(key: Int, value: Array[Byte]): MemoryRecords = TestUtils.singletonRecords(key = key.toString.getBytes, value = value) private def unkeyedRecord(value: Int): MemoryRecords = TestUtils.singletonRecords(value = value.toString.getBytes) private def tombstoneRecord(key: Int): MemoryRecords = record(key, null) } class FakeOffsetMap(val slots: Int) extends OffsetMap { val map = new java.util.HashMap[String, Long]() var lastOffset = -1L private def keyFor(key: ByteBuffer) = new String(Utils.readBytes(key.duplicate), "UTF-8") override def put(key: ByteBuffer, offset: Long): Unit = { lastOffset = offset map.put(keyFor(key), offset) } override def get(key: ByteBuffer): Long = { val k = keyFor(key) if(map.containsKey(k)) map.get(k) else -1L } override def clear(): Unit = map.clear() override def size: Int = map.size override def latestOffset: Long = lastOffset override def updateLatestOffset(offset: Long): Unit = { lastOffset = offset } override def toString: String = map.toString }
airbnb/kafka
core/src/test/scala/unit/kafka/log/LogCleanerTest.scala
Scala
apache-2.0
53,074
/* * Copyright 2015 - 2016 Red Bull Media House GmbH <http://www.redbullmediahouse.com> - all rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.rbmhtechnology.eventuate.crdt import akka.actor._ import akka.testkit._ import com.rbmhtechnology.eventuate.SingleLocationSpecLeveldb import com.rbmhtechnology.eventuate.utilities._ import org.scalatest._ class CRDTServiceSpecLeveldb extends TestKit(ActorSystem("test")) with WordSpecLike with Matchers with SingleLocationSpecLeveldb { "A CRDTService" must { "manage multiple CRDTs identified by name" in { val service = new CounterService[Int]("a", log) service.update("a", 1).await should be(1) service.update("b", 2).await should be(2) service.value("a").await should be(1) service.value("b").await should be(2) } "ignore events from CRDT services of different type" in { val service1 = new CounterService[Int]("a", log) val service2 = new MVRegisterService[Int]("b", log) val service3 = new LWWRegisterService[Int]("c", log) service1.update("a", 1).await should be(1) service2.set("a", 1).await should be(Set(1)) service3.set("a", 1).await should be(Some(1)) } } "A CounterService" must { "return the default value of a Counter" in { val service = new CounterService[Int]("a", log) service.value("a").await should be(0) } "increment a Counter" in { val service = new CounterService[Int]("a", log) service.update("a", 3).await should be(3) service.update("a", 2).await should be(5) service.value("a").await should be(5) } "decrement a Counter" in { val service = new CounterService[Int]("a", log) service.update("a", -3).await should be(-3) service.update("a", -2).await should be(-5) service.value("a").await should be(-5) } } "An MVRegisterService" must { "return the default value of an MVRegister" in { val service = new MVRegisterService[Int]("a", log) service.value("a").await should be(Set()) } "return the written value of an MVRegister" in { val service = new MVRegisterService[Int]("a", log) service.set("a", 1).await should be(Set(1)) service.value("a").await should be(Set(1)) } } "An LWWRegisterService" must { "return the default value of an LWWRegister" in { val service = new LWWRegisterService[Int]("a", log) service.value("a").await should be(None) } "return the written value of an LWWRegister" in { val service = new LWWRegisterService[Int]("a", log) service.set("a", 1).await should be(Some(1)) service.value("a").await should be(Some(1)) } } "An ORSetService" must { "return the default value of an ORSet" in { val service = new ORSetService[Int]("a", log) service.value("a").await should be(Set()) } "add an entry" in { val service = new ORSetService[Int]("a", log) service.add("a", 1).await should be(Set(1)) service.value("a").await should be(Set(1)) } "mask duplicates" in { val service = new ORSetService[Int]("a", log) service.add("a", 1).await should be(Set(1)) service.add("a", 1).await should be(Set(1)) service.value("a").await should be(Set(1)) } "remove an entry" in { val service = new ORSetService[Int]("a", log) service.add("a", 1).await should be(Set(1)) service.remove("a", 1).await should be(Set()) service.value("a").await should be(Set()) } "remove duplicates" in { val service = new ORSetService[Int]("a", log) service.add("a", 1).await should be(Set(1)) service.add("a", 1).await should be(Set(1)) service.remove("a", 1).await should be(Set()) service.value("a").await should be(Set()) } } "An ORCartService" must { "return the default value of an ORCart" in { val service = new ORCartService[String]("a", log) service.value("a").await should be(Map()) } "set initial entry quantities" in { val service = new ORCartService[String]("a", log) service.add("a", "123", 1).await should be(Map("123" -> 1)) service.add("a", "124", 1).await should be(Map("123" -> 1, "124" -> 1)) service.value("a").await should be(Map("123" -> 1, "124" -> 1)) } "increment existing entry quantities" in { val service = new ORCartService[String]("a", log) service.add("a", "123", 1).await should be(Map("123" -> 1)) service.add("a", "123", 1).await should be(Map("123" -> 2)) } "remove entries" in { val service = new ORCartService[String]("a", log) service.add("a", "123", 1).await should be(Map("123" -> 1)) service.add("a", "124", 1).await should be(Map("123" -> 1, "124" -> 1)) service.remove("a", "123").await should be(Map("124" -> 1)) service.value("a").await should be(Map("124" -> 1)) } "reject non-positive quantities" in { val service = new ORCartService[String]("a", log) intercept[IllegalArgumentException](service.add("a", "123", 0).await) intercept[IllegalArgumentException](service.add("a", "123", -1).await) } } }
ianclegg/eventuate
eventuate-crdt/src/it/scala/com/rbmhtechnology/eventuate/crdt/CRDTServiceSpecLeveldb.scala
Scala
apache-2.0
5,722
package mesosphere.marathon package stream import akka.actor.{ ActorRef, Props, Status } import akka.{ Done, NotUsed } import akka.stream.{ Graph, SinkShape, UniformFanOutShape } import akka.stream.scaladsl.{ SinkQueueWithCancel, Sink => AkkaSink } import org.reactivestreams.{ Publisher, Subscriber } import scala.collection.immutable import scala.collection.immutable.Seq import scala.concurrent.{ ExecutionContext, Future } import scala.util.Try /** * Extensions to Akka's Sink companion */ object Sink { def set[T]: AkkaSink[T, Future[immutable.Set[T]]] = { AkkaSink.fromGraph(new CollectionStage[T, immutable.Set[T]](immutable.Set.newBuilder[T])) } def sortedSet[T](implicit ordering: Ordering[T]): AkkaSink[T, Future[immutable.SortedSet[T]]] = { AkkaSink.fromGraph(new CollectionStage[T, immutable.SortedSet[T]](immutable.SortedSet.newBuilder[T])) } def map[K, V]: AkkaSink[(K, V), Future[immutable.Map[K, V]]] = { AkkaSink.fromGraph(new CollectionStage[(K, V), immutable.Map[K, V]](immutable.Map.newBuilder[K, V])) } def list[T]: AkkaSink[T, Future[List[T]]] = { AkkaSink.fromGraph(new CollectionStage[T, List[T]](List.newBuilder[T])) } // Akka's API def fromGraph[T, M](g: Graph[SinkShape[T], M]): AkkaSink[T, M] = AkkaSink.fromGraph(g) def fromSubscriber[T](subscriber: Subscriber[T]): AkkaSink[T, NotUsed] = AkkaSink.fromSubscriber(subscriber) def cancelled[T]: AkkaSink[T, NotUsed] = AkkaSink.cancelled def head[T]: AkkaSink[T, Future[T]] = AkkaSink.head def headOption[T]: AkkaSink[T, Future[Option[T]]] = AkkaSink.headOption def last[T]: AkkaSink[T, Future[T]] = AkkaSink.last[T] def lastOption[T]: AkkaSink[T, Future[Option[T]]] = AkkaSink.lastOption[T] def seq[T]: AkkaSink[T, Future[Seq[T]]] = AkkaSink.seq[T] def asPublisher[T](fanout: Boolean): AkkaSink[T, Publisher[T]] = AkkaSink.asPublisher[T](fanout) def ignore: AkkaSink[Any, Future[Done]] = AkkaSink.ignore def foreach[T](f: T => Unit): AkkaSink[T, Future[Done]] = AkkaSink.foreach[T](f) def combine[T, U]( first: AkkaSink[U, _], second: AkkaSink[U, _], rest: AkkaSink[U, _]*)(strategy: Int ⇒ Graph[UniformFanOutShape[T, U], NotUsed]): AkkaSink[T, NotUsed] = AkkaSink.combine[T, U](first, second, rest: _*)(strategy) def foreachParallel[T](parallelism: Int)(f: T ⇒ Unit)(implicit ec: ExecutionContext): AkkaSink[T, Future[Done]] = AkkaSink.foreachParallel[T](parallelism)(f) def fold[U, T](zero: U)(f: (U, T) ⇒ U): AkkaSink[T, Future[U]] = AkkaSink.fold[U, T](zero)(f) def reduce[T](f: (T, T) ⇒ T): AkkaSink[T, Future[T]] = AkkaSink.reduce(f) def onComplete[T](callback: Try[Done] => Unit): AkkaSink[T, NotUsed] = AkkaSink.onComplete(callback) def actorRef[T](ref: ActorRef, onCompleteMessage: Any): AkkaSink[T, NotUsed] = AkkaSink.actorRef(ref, onCompleteMessage) def actorRefWithAck[T](ref: ActorRef, onInitMessage: Any, ackMessage: Any, onCompleteMessage: Any, onFailureMessage: (Throwable) ⇒ Any = Status.Failure): AkkaSink[T, NotUsed] = AkkaSink.actorRefWithAck(ref, onInitMessage, ackMessage, onCompleteMessage, onFailureMessage) def actorSubscriber[T](props: Props): AkkaSink[T, ActorRef] = AkkaSink.actorSubscriber(props) def queue[T](): AkkaSink[T, SinkQueueWithCancel[T]] = AkkaSink.queue[T]() }
natemurthy/marathon
src/main/scala/mesosphere/marathon/stream/Sink.scala
Scala
apache-2.0
3,307
/*********************************************************************** * Copyright (c) 2013-2018 Commonwealth Computer Research, Inc. * All rights reserved. This program and the accompanying materials * are made available under the terms of the Apache License, Version 2.0 * which accompanies this distribution and is available at * http://www.opensource.org/licenses/apache2.0.php. ***********************************************************************/ package org.locationtech.geomesa.index.conf import java.util.concurrent.TimeUnit import com.github.benmanes.caffeine.cache.Caffeine import org.geotools.feature.simple.SimpleFeatureTypeBuilder import org.locationtech.geomesa.features.SerializationOption.SerializationOptions import org.locationtech.geomesa.features.SimpleFeatureSerializer import org.locationtech.geomesa.features.kryo.{KryoFeatureSerializer, ProjectingKryoFeatureSerializer} import org.locationtech.geomesa.index.metadata.CachedLazyMetadata import org.locationtech.geomesa.index.planning.Transforms import org.locationtech.geomesa.utils.cache.CacheKeyGenerator import org.opengis.feature.simple.SimpleFeatureType import org.opengis.filter.Filter trait ColumnGroups[T <: AnyRef] { import org.locationtech.geomesa.utils.geotools.RichAttributeDescriptors.RichAttributeDescriptor import scala.collection.JavaConverters._ private val cache = { val expiry = CachedLazyMetadata.Expiry.toDuration.get.toMillis Caffeine.newBuilder().expireAfterWrite(expiry, TimeUnit.MILLISECONDS).build[String, Seq[(T, SimpleFeatureType)]]() } private lazy val defaultString = convert(default) private lazy val reservedStrings = reserved.map(convert) /** * Default column group, that contains all the attributes * * @return */ def default: T /** * Any additional column groups that are used internally * * @return */ protected def reserved: Set[T] /** * Convert a user-specified group into the appropriate type * * @param group group * @return */ protected def convert(group: String): T /** * Convert a group back into a user-specified string * * @param group group * @return */ protected def convert(group: T): String /** * Gets the column groups for a simple feature type. The default group will contain all columns * * @param sft simple feature type * @return */ def apply(sft: SimpleFeatureType): Seq[(T, SimpleFeatureType)] = { val key = CacheKeyGenerator.cacheKey(sft) var groups = cache.getIfPresent(key) if (groups == null) { val map = scala.collection.mutable.Map.empty[String, SimpleFeatureTypeBuilder] sft.getAttributeDescriptors.asScala.foreach { descriptor => descriptor.getColumnGroups().foreach { group => map.getOrElseUpdate(group, new SimpleFeatureTypeBuilder()).add(descriptor) } } val sfts = map.map { case (group, builder) => builder.setName(sft.getTypeName) val subset = builder.buildFeatureType() subset.getUserData.putAll(sft.getUserData) (convert(group), subset) } + (default -> sft) // return the smallest groups first groups = sfts.toSeq.sortBy(_._2.getAttributeCount) cache.put(key, groups) } groups } /** * Get serializers for each column group * * @param sft simple feature type * @return */ def serializers(sft: SimpleFeatureType): Seq[(T, SimpleFeatureSerializer)] = { apply(sft).map { case (colFamily, subset) => if (colFamily.eq(default)) { (colFamily, KryoFeatureSerializer(subset, SerializationOptions.withoutId)) } else { (colFamily, new ProjectingKryoFeatureSerializer(sft, subset, SerializationOptions.withoutId)) } } } /** * Find a column group that supports the given transform and filter * * @param sft simple feature type * @param transform transform definitions * @param ecql filter, if any * @return */ def group(sft: SimpleFeatureType, transform: String, ecql: Option[Filter]): (T, SimpleFeatureType) = { val definitions = Transforms.definitions(transform) val groups = apply(sft).iterator var group = groups.next // last group has all the columns, so just return the last one if nothing else matches while (groups.hasNext && !Transforms.supports(group._2, definitions, ecql)) { group = groups.next } group } /** * Validate that the column groups do not overlap with reserved column groups * * @param sft simple feature type */ def validate(sft: SimpleFeatureType): Unit = { // note: we validate against strings, as some col group types don't compare well (i.e. byte arrays) val groups = sft.getAttributeDescriptors.asScala.flatMap(_.getColumnGroups()).distinct groups.foreach { group => if (group == defaultString || reservedStrings.contains(group)) { throw new IllegalArgumentException(s"Column group '$group' is reserved for internal use - " + "please choose another name") } } } }
ddseapy/geomesa
geomesa-index-api/src/main/scala/org/locationtech/geomesa/index/conf/ColumnGroups.scala
Scala
apache-2.0
5,122
// // SupportForDOrc.scala -- Scala class SupportForDOrc // Project OrcScala // // Created by jthywiss on Dec 21, 2015. // // Copyright (c) 2017 The University of Texas at Austin. All rights reserved. // // Use and redistribution of this file is governed by the license terms in // the LICENSE file found in the project's top-level directory and also found at // URL: http://orc.csres.utexas.edu/license.shtml . // package orc.run.extensions import orc.run.Orc /** SupportForDOrc adds facilities for distributed Orc to an Orc runtime engine * * @author jthywiss */ trait SupportForDOrc extends Orc { }
orc-lang/orc
OrcScala/src/orc/run/extensions/SupportForDOrc.scala
Scala
bsd-3-clause
612
package com.ml abstract class FitnessEval{ //returns scale of output values val range: Double //the size of input vectors associated with this function val inputCount: Int //the size of expected output vectors val outputCount: Int //returns fitness of given function def apply(func: Seq[Double] => Seq[Double]) : Double }
BrettAM/EvComp
src/main/scala/ml/FitnessEval.scala
Scala
apache-2.0
355
package com.rackspace.prefs.model import com.mchange.v2.c3p0.ComboPooledDataSource import com.rackspace.prefs.model.DBTables._ import scala.slick.driver.JdbcDriver.simple._ import scala.slick.jdbc.JdbcBackend.Database import org.joda.time.DateTime import scala.io.Source trait InitDbTrait { /** * When you run the tests from command line using gradle(Ex: gradle test), it runs the * tests with reference to the app directory * * When you run the tests from intellij, it is running the test with reference to the main * project directory. This code handles the difference. */ private val jdbcUrl = { val userDir: String = System.getProperty("user.dir").replaceFirst("/$", "") if (userDir.endsWith("app")) s"jdbc:h2:file:$userDir/build/db/test/preferencesdb;MODE=PostgreSQL;IGNORECASE=TRUE" else s"jdbc:h2:file:$userDir/app/build/db/test/preferencesdb;MODE=PostgreSQL;IGNORECASE=TRUE" } val ds: ComboPooledDataSource = new ComboPooledDataSource ds.setJdbcUrl(jdbcUrl) val ArchivePrefsMetadataSlug = "archive" val archivingSchema = Source.fromInputStream(getClass.getResourceAsStream("/feeds_archives.schema.orderly")).mkString def getPreference(db: Database, id: String): Option[Preferences] = { db.withSession { implicit session => preferences.filter(_.id === id).list match { case List(preferences: Preferences) => Some(preferences) case _ => None } } } def createPreference(db: Database, id: String, preferenceSlug: String, payload: String) { val currentTime = new DateTime() db.withSession { implicit session => val prefsMetadataId = preferencesMetadata.filter(_.slug === preferenceSlug).map(_.id).run.head preferences .map(p => (p.id, p.preferencesMetadataId, p.payload, p.alternateId)) .insert(id, prefsMetadataId, payload, Some("alternate_id")) } } def clearData(db: Database) { db.withSession { implicit session => preferences.delete } } def dropMetadata(db: Database) { db.withSession{ implicit session => preferencesMetadata.ddl.drop } } def createMetadata(db: Database): Unit = { db.withSession { implicit session => preferencesMetadata.ddl.create preferencesMetadata .map(pm => (pm.id, pm.slug, pm.description, pm.schema)) .insert(1, ArchivePrefsMetadataSlug, "Cloud Feeds Archiving Preferences", archivingSchema) } } }
fred5156/preferences-service
app/src/test/scala/com/rackspace/prefs/model/InitDbTrait.scala
Scala
apache-2.0
2,638
package com.shorrockin.narrator import java.io.Serializable import collection.mutable.HashMap import utils.Logging object ActionStats extends Logging { } /** * holds statistics related to the operation of an action * * @author Chris Shorrock */ @SerialVersionUID(1L) class ActionStats(val description:String) extends Serializable { import ActionStats._ var iterations = 0L var totalTime = 0L var maxTime = 0L var minTime = 0L var exceptions = 0L var userExceptions = HashMap[String, Long]() private def iterFloat = iterations.asInstanceOf[Float] def userExceptionCount = userExceptions.foldLeft(0L) { _ + _._2 } def totalExceptions = exceptions + userExceptionCount def averageTime = noDivZero(iterations, 0F) { totalTime / iterFloat } def successRate = noDivZero(iterations, -1F) { ((iterations - totalExceptions) / iterFloat) * 100 } def totalExceptionRate = noDivZero(iterations, -1F) { (totalExceptions / iterFloat) * 100 } def exceptionRate = noDivZero(iterations, -1F) { (exceptions / iterFloat) * 100 } def userExceptionRate = noDivZero(iterations, -1F) { (userExceptionCount / iterFloat) * 100 } def requestRatePerSec = noDivZero(iterations, 0F) { (iterFloat / (totalTime / 1000F)) } private def noDivZero[E](test:Long, or:E)(f: => E):E = if (test == 0) or else f /** * executes the specified action and processes the results * so that they are saved. */ def gather(action:Action) = { val start = System.currentTimeMillis try { action.worker.get() } catch { case t:NarratorStoryException => reportUserException(t.id, 1) case t:Throwable => exceptions = exceptions + 1 logger.error("unexpected exception in story actor: " + t.getMessage, t) } finally { val end = System.currentTimeMillis iterations = iterations + 1 totalTime = totalTime + (end - start) maxTime = if (totalTime > maxTime) totalTime else maxTime minTime = if (totalTime < minTime) totalTime else minTime } } def reportUserException(id:String, count:Long) { val current = userExceptions.getOrElseUpdate(id, 0L) userExceptions.put(id, current + count) } } /** * holds statistics related to the operation of a story, and all the actions * contained within that story. * * @author Chris Shorrock */ @SerialVersionUID(1L) class StoryStats(val description:String) extends Serializable { def this(s:Story) = this(s.description) var stats = List[ActionStats]() /** * executes the specified action and processes the results so that they are saved * within this story. */ def gather(action:Action) = { val description = action.description val stat = find(description) match { case Some(s) => s case None => val s = new ActionStats(description) ; stats = s :: stats ; s } stat.gather(action) } /** * finds the state for the action with the specified description. */ def find(desc:String) = stats.find { _.description.equals(desc) } /** * merges the results of the story stats specified with this stat. */ def merge(other:StoryStats) { if (other.description.equals(description)) { other.stats.foreach { stat => find(stat.description) match { case None => stats = stat :: stats case Some(s) => s.iterations = stat.iterations + s.iterations s.totalTime = stat.totalTime + s.totalTime s.exceptions = stat.exceptions + s.exceptions s.maxTime = if (stat.maxTime > s.maxTime) stat.maxTime else s.maxTime s.minTime = if (stat.minTime < s.minTime) stat.minTime else s.minTime stat.userExceptions.foreach { (tup) => s.reportUserException(tup._1, tup._2) } } } } else { throw new IllegalArgumentException("unable to merge story stats of two seperate descriptions %s and %s".format(description, other.description)) } } } @SerialVersionUID(1L) class WorkloadStats extends Serializable { var stats = List[StoryStats]() def merge(other:StoryStats):WorkloadStats = { stats.find { ss => ss.description.equals(other.description) } match { case None => stats = other :: stats case Some(i) => i.merge(other) } this } def merge(other:WorkloadStats):WorkloadStats = { merge(other.stats) } def merge(other:List[StoryStats]):WorkloadStats = { other.foreach { merge(_) } this } }
shorrockin/narrator
src/main/scala/ActionStats.scala
Scala
apache-2.0
4,678
package io.finch import cats.Eq import cats.instances.AllInstances import cats.laws._ import cats.laws.discipline._ import org.scalacheck.{Arbitrary, Prop} import org.typelevel.discipline.Laws trait DecodePathLaws[A] extends Laws with MissingInstances with AllInstances { def capture: DecodePath[A] def roundTrip(a: A): IsEq[Option[A]] = capture(a.toString) <-> Some(a) def all(implicit A: Arbitrary[A], eq: Eq[A]): RuleSet = new DefaultRuleSet( name = "all", parent = None, "roundTrip" -> Prop.forAll { (a: A) => roundTrip(a) } ) } object DecodePathLaws { def apply[A: DecodePath]: DecodePathLaws[A] = new DecodePathLaws[A] { def capture: DecodePath[A] = DecodePath[A] } }
yanana/finch
core/src/test/scala/io/finch/DecodePathLaws.scala
Scala
apache-2.0
712
package ozmi.lambda_core package lib import org.kiama.rewriting.Rewriter._ object Integer extends TypeInstance { import Eq._ import Ord._ import Num._ override lazy val evalRules = rule[Expr] { // Eq case Equal (Literal (a : BigInt), Literal (b : BigInt)) if (a compare b) == 0 => Bool.True case Equal (Literal (a : BigInt), Literal (b : BigInt)) if (a compare b) != 0 => Bool.False // Ord case LessThan (Literal (a : BigInt), Literal (b : BigInt)) => Literal (a < b) case LessThanOrEqual (Literal (a : BigInt), Literal (b : BigInt)) => Literal (a <= b) case GreaterThan (Literal (a : BigInt), Literal (b : BigInt)) => Literal (a > b) case GreaterThanOrEqual (Literal (a : BigInt), Literal (b : BigInt)) => Literal (a >= b) // Num case Add (Literal (a : BigInt), Literal (b : BigInt)) => Literal (a + b) case Subtract (Literal (a : BigInt), Literal (b : BigInt)) => Literal (a - b) case Multiply (Literal (a : BigInt), Literal (b : BigInt)) => Literal (a * b) } }
ozmi/lambda_core
src/main/scala/ozmi/lambda_core/lib/Integer.scala
Scala
mit
1,152
package dreamer.context import scala.util.Random import scala.collection.GenTraversableOnce import scalaz._, Scalaz._ import util.forked._, ForkedState._ import dreamer.concept._ import util.Util._ import Concept._ import Relation._ case class Context( val mind: MentalMap, val r: Random=new Random, val refList: List[Context.Ref]=Nil, val it: Option[Context.Ref]=None, val justLookedAt: Option[Context.Ref]=None) { def ref(ref: Context.Ref): Context = ref.real match { case Realized(_) => this.copy(refList= (ref :: refList).distinct) case _ => this } } object Context { case class Ref(val real: Concept, val arche: Concept) var extraSpecification = false def ref(c: Concept): State[Context,Concept] = c match { case Realized(_) => for { ctx: Context <- init val ref = Ref(c, archetype(ctx,c)) _ <- put(ctx.ref(ref)) } yield c case _ => state(c) } def setIt(c: Concept): State[Context,Concept] = c match { case Realized(_) => for { ctx: Context <- init val ref = Ref(c, archetype(ctx,c)) _ <- put(ctx.copy(it=Some(ref))) } yield c case _ => state(c) } def clearIt: State[Context,Unit] = for { ctx:Context <- get val ctx1 = ctx.copy(it=None) _ <- put(ctx1) } yield () def getIt: ForkedState[Context,Concept] = for (ctx <- fget; _ <- continueIf(!ctx.it.isEmpty)) yield ctx.it.get.real def isIt(ctx: Context, c: Concept): Boolean = ctx.it match { case Some(ref) => ref.real == c case _ => false } def isReffed(c: Concept): State[Context,Boolean] = for { ctx <- init } yield isReffed(ctx, c) def isReffed(ctx: Context, c: Concept): Boolean = { (ctx.refList.map(_.real) contains c) } def archetype(ctx: Context, real: Concept): Concept = { val results = ctx.mind.ask(Question(real, IsA, What)).map(_.end) if (results.size == 0) Thing else ctx.r.shuffle(results.toList).head } def archetype[T](ctx: Context, qf: QFragment[T]): QFragment[T] = qf match { case x: Realized => archetype(ctx, x) case _ => qf } def superkind[T](ctx: Context, qf: Concept): Concept = qf match { case x: Realized => {Thing} case x: Abstract => val results = ctx.mind.searchWhat(Question(x, IsA, What)) if (results.size == 0) qf else ctx.r.shuffle(results.toList).head case _ => qf } def superkind[T](ctx: Context, qf: QFragment[T]): QFragment[T] = qf match { case x: Concept => superkind(ctx, x) case _ => qf } def archetype(real: Concept): State[Context,Concept] = for (ctx <- init) yield archetype(ctx, real) def archetype[T](qf: QFragment[T]): State[Context,QFragment[T]] = for (ctx <- init) yield archetype(ctx, qf) def ask[T](q: Question[T]): State[Context,List[Edge]] = State{ctx => (ctx, ctx.mind.ask(q).toList)} def search[T](qs: Question[T]*): State[Context,List[Map[T,Concept]]] = State{ctx => (ctx, ctx.mind.search(qs:_*).toList)} def searchWhat(qs: Question[Unit]*): State[Context,List[Concept]] = State{ctx => (ctx, ctx.mind.searchWhat(qs:_*).toList)} def isAwake(ctx: Context): Boolean = !ctx.mind.ask(Question(Self,HasState,Awake)).isEmpty // Sometimes you can only reify one answer: e.g. if you ask where a house is // we want only one answer: street. Otherwise we could leave a house and // the dreamer would be in multiple locations private def reificationLimit[T](q: Question[T]) = q match { case Question(_, IsA, Variable(_)) => Some(1) case Question(_, AtLocation, Variable(_)) => Some(1) case Question(_, NextTo(_), _) => Some(1) case _ => None } // Ask a question, but if there are no answers, then dream some up def reifyingAsk[T](q: Question[T]): State[Context,List[Edge]] = State(ctx => reifyingAsk(ctx, q)) private def reifyingAsk[T](ctx: Context, q: Question[T]): (Context,List[Edge]) = { val current = ctx.mind.search(q) def defaultResult = for { mapping <- current.toList edge <- ctx.mind.ask(q.assign(mapping.get)) } yield edge // special reification control: def search(ctx: Context, q: Question[T]): Set[Map[T,Concept]] = q match { case Question(Variable(x), NextTo(_), Abstract(y)) => Set(Map(x -> Abstract(y))) case _ => ctx.mind.search(q) } def dreamWeirdDefaults(ctx: Context, q: Question[T]): List[Map[T,Concept]] = // Called if there are no example AtLocations for somewhere. // We return random Things to fill the empty space. q match { case Question(Variable(x), AtLocation, _) => ctx.mind.search(Question(Variable(x), IsA, Thing)).toList case _ => List() } def extendRealizations(e: Edge): List[Edge] = // Produce extra edges that must also exist for the new edge to be valid e match { case Edge(x,HasA,y) => Edge(y,AtLocation,x) :: e :: Nil case _ => e :: Nil } def dreamUpResult: (Context,List[Edge]) = { debug("Question "+q.toString+" yielded no results") val absQ = q.map(archetype(ctx, _)) debug(" Abstracted question: "+absQ.toString) val possibilities0: List[Map[T,Concept]] = if (extraSpecification) { val absQ2 = q.map(qf => superkind(ctx, archetype(ctx, qf))) debug(" Even more abstract question: "+absQ2.toString) (search(ctx, absQ) | search(ctx, absQ2)).toList } else { search(ctx, absQ).toList } debug(" Yielded "+possibilities0.size+" results") val weird = possibilities0.size == 0 && ctx.r.nextBoolean val possibilities = if (weird) dreamWeirdDefaults(ctx, absQ) else possibilities0 // reify a random subset of them val count = math.max((if (weird) 2 else reificationLimit(q) match { case Some(1) => 1 case Some(x) => math.max(0, ctx.r.nextInt(x-2) + 2) case None => ctx.r.nextInt(4) + 2 }) - current.size, 0) debug(" Reifying "+count) val archetypes: List[Map[T,Concept]] = ctx.r.shuffle(possibilities).take(count) debug(" They are: "+archetypes.toString) // reify and add them to the map archetypes.foldLeft((ctx,List[Edge]()))( (ctx_acc, mapping: Map[T,Concept]) => { val (ctx, acc) = ctx_acc val (ctx1, reifyMap) = reify(ctx, mapping.values) val mind = ctx1.mind val edge = q.toEdge(t => for (abs <- mapping.get(t); real <- reifyMap.get(abs)) yield real) val mind1 = edge match { case Some(e) => extendRealizations(e).foldLeft(mind)((a,b)=>a+b) case _ => mind } (ctx1.copy(mind=mind1), edge match { case Some(x) => x::acc case None => acc }) }) } val min = reificationLimit(q) match { case Some(x) => x-1 case None => 2 } if (current.size > min || isAwake(ctx)) { (ctx, defaultResult) } else q match { case Question(Realized(_), _, _) => val (ctx1,r) = dreamUpResult (ctx1, defaultResult ++ r) case Question(_, _, Realized(_)) => val (ctx1,r) = dreamUpResult (ctx1, defaultResult ++ r) case _ => (ctx, defaultResult) } } def reifyingSearch(q: Question[Unit]): State[Context,List[Concept]] = { require(q match { case Question(What,_,What) => false case Question(What,_,_) => true case Question(_,_,What) => true case Question(_,_,_) => false }) for (edgeSet <- reifyingAsk(q)) yield { for { edge <- edgeSet val unification = q.unify(edge) if !unification.isEmpty } yield unification.get.get(()).get } } def reifyingSearchAll(q: Question[Unit]): ForkedState[Context,List[Concept]] = fork(reifyingSearch(q)) def reifyingSearch1(q: Question[Unit]): ForkedState[Context,Concept] = refork(reifyingSearch(q)) def reify(c: Concept): State[Context,Concept] = State(ctx => reify(ctx, c)) private def reify(ctx: Context, c: Concept): (Context, Concept) = { val arche = if (extraSpecification) { val specifics = ctx.mind.searchWhat(Question(What,IsA,c)) if (extraSpecification && specifics.size > 4) { val specific = ctx.r.shuffle(specifics).head debug("Original arche was "+c.toString+"; making more specific "+ specific.toString) specific } else c } else c val (mind, real) = ctx.mind.reify(arche) (ctx.copy(mind=mind), real) } private def reify(ctx: Context, concepts: GenTraversableOnce[Concept]) : (Context, Map[Concept,Concept]) = concepts.foldLeft((ctx,Map[Concept,Concept]()))((acc, concept) => { val (ctx1, cmap) = acc val (ctx2, real) = reify(ctx1, concept) (ctx2, cmap + (concept -> real)) }) def tell(e: Edge): State[Context,Edge] = State(ctx => tell(ctx, e)) private def tell(ctx: Context, e: Edge): (Context, Edge) = { val mind = ctx.mind + e (ctx.copy(mind=mind), e) } def forget(e: Edge): State[Context,Unit] = State(ctx => (forget(ctx, e), ())) def forget(ctx: Context, e: Edge): Context = { val mind = ctx.mind - e ctx.copy(mind=mind) } }
tcoxon/dreamer
src/dreamer/context.scala
Scala
mit
9,363
package jp.ac.nagoya_u.dsmoq.sdk.request.json private[request] case class SetGuestAccessLevelJson(accessLevel: Int) extends Jsonable
nkawa/dsmoq
sdk/src/main/java/jp/ac/nagoya_u/dsmoq/sdk/request/json/SetGuestAccessLevelJson.scala
Scala
apache-2.0
134
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.deploy.k8s.submit.submitsteps import io.fabric8.kubernetes.api.model.{ContainerBuilder, EnvVarBuilder, EnvVarSourceBuilder, PodBuilder, QuantityBuilder} import scala.collection.JavaConverters._ import org.apache.spark.SparkConf import org.apache.spark.deploy.k8s.ConfigurationUtils import org.apache.spark.deploy.k8s.config._ import org.apache.spark.deploy.k8s.constants._ /** * Represents the initial setup required for the driver. */ private[spark] class BaseDriverConfigurationStep( kubernetesAppId: String, kubernetesResourceNamePrefix: String, driverLabels: Map[String, String], dockerImagePullPolicy: String, appName: String, mainClass: String, appArgs: Array[String], submissionSparkConf: SparkConf) extends DriverConfigurationStep { private val kubernetesDriverPodName = submissionSparkConf.get(KUBERNETES_DRIVER_POD_NAME) .getOrElse(s"$kubernetesResourceNamePrefix-driver") private val driverExtraClasspath = submissionSparkConf.get( org.apache.spark.internal.config.DRIVER_CLASS_PATH) // CPU settings private val driverCpuCores = submissionSparkConf.getOption("spark.driver.cores").getOrElse("1") private val driverLimitCores = submissionSparkConf.get(KUBERNETES_DRIVER_LIMIT_CORES) // Memory settings private val driverMemoryMiB = submissionSparkConf.get( org.apache.spark.internal.config.DRIVER_MEMORY) private val driverMemoryString = submissionSparkConf.get( org.apache.spark.internal.config.DRIVER_MEMORY.key, org.apache.spark.internal.config.DRIVER_MEMORY.defaultValueString) private val memoryOverheadMiB = submissionSparkConf .get(KUBERNETES_DRIVER_MEMORY_OVERHEAD) .getOrElse(math.max((MEMORY_OVERHEAD_FACTOR * driverMemoryMiB).toInt, MEMORY_OVERHEAD_MIN_MIB)) private val driverContainerMemoryWithOverheadMiB = driverMemoryMiB + memoryOverheadMiB private val driverDockerImage = submissionSparkConf.get(DRIVER_DOCKER_IMAGE) override def configureDriver( driverSpec: KubernetesDriverSpec): KubernetesDriverSpec = { val driverExtraClasspathEnv = driverExtraClasspath.map { classPath => new EnvVarBuilder() .withName(ENV_SUBMIT_EXTRA_CLASSPATH) .withValue(classPath) .build() } val driverCustomAnnotations = ConfigurationUtils .parsePrefixedKeyValuePairs( submissionSparkConf, KUBERNETES_DRIVER_ANNOTATION_PREFIX, "annotation") require(!driverCustomAnnotations.contains(SPARK_APP_NAME_ANNOTATION), s"Annotation with key $SPARK_APP_NAME_ANNOTATION is not allowed as it is reserved for" + s" Spark bookkeeping operations.") val driverCustomEnvs = submissionSparkConf.getAllWithPrefix(KUBERNETES_DRIVER_ENV_KEY).toSeq .map(env => new EnvVarBuilder() .withName(env._1) .withValue(env._2) .build()) val allDriverAnnotations = driverCustomAnnotations ++ Map(SPARK_APP_NAME_ANNOTATION -> appName) val nodeSelector = ConfigurationUtils.parsePrefixedKeyValuePairs( submissionSparkConf, KUBERNETES_NODE_SELECTOR_PREFIX, "node selector") val driverCpuQuantity = new QuantityBuilder(false) .withAmount(driverCpuCores) .build() val driverMemoryQuantity = new QuantityBuilder(false) .withAmount(s"${driverMemoryMiB}Mi") .build() val driverMemoryLimitQuantity = new QuantityBuilder(false) .withAmount(s"${driverContainerMemoryWithOverheadMiB}Mi") .build() val maybeCpuLimitQuantity = driverLimitCores.map { limitCores => ("cpu", new QuantityBuilder(false).withAmount(limitCores).build()) } val driverContainer = new ContainerBuilder(driverSpec.driverContainer) .withName(DRIVER_CONTAINER_NAME) .withImage(driverDockerImage) .withImagePullPolicy(dockerImagePullPolicy) .addAllToEnv(driverCustomEnvs.asJava) .addToEnv(driverExtraClasspathEnv.toSeq: _*) .addNewEnv() .withName(ENV_DRIVER_MEMORY) .withValue(driverMemoryString) .endEnv() .addNewEnv() .withName(ENV_DRIVER_MAIN_CLASS) .withValue(mainClass) .endEnv() .addNewEnv() .withName(ENV_DRIVER_ARGS) .withValue(appArgs.mkString(" ")) .endEnv() .addNewEnv() .withName(ENV_DRIVER_BIND_ADDRESS) .withValueFrom(new EnvVarSourceBuilder() .withNewFieldRef("v1", "status.podIP") .build()) .endEnv() .withNewResources() .addToRequests("cpu", driverCpuQuantity) .addToRequests("memory", driverMemoryQuantity) .addToLimits("memory", driverMemoryLimitQuantity) .addToLimits(maybeCpuLimitQuantity.toMap.asJava) .endResources() .build() val baseDriverPod = new PodBuilder(driverSpec.driverPod) .editOrNewMetadata() .withName(kubernetesDriverPodName) .addToLabels(driverLabels.asJava) .addToAnnotations(allDriverAnnotations.asJava) .endMetadata() .withNewSpec() .withRestartPolicy("Never") .withNodeSelector(nodeSelector.asJava) .endSpec() .build() val resolvedSparkConf = driverSpec.driverSparkConf.clone() .setIfMissing(KUBERNETES_DRIVER_POD_NAME, kubernetesDriverPodName) .set("spark.app.id", kubernetesAppId) .set(KUBERNETES_EXECUTOR_POD_NAME_PREFIX, kubernetesResourceNamePrefix) driverSpec.copy( driverPod = baseDriverPod, driverSparkConf = resolvedSparkConf, driverContainer = driverContainer) } }
publicRoman/spark
resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/submit/submitsteps/BaseDriverConfigurationStep.scala
Scala
apache-2.0
6,363
package org.jetbrains.plugins.scala.lang.dfa.analysis.tests import org.jetbrains.plugins.scala.lang.dfa.Messages.{ConditionAlwaysFalse, ConditionAlwaysTrue, InvocationIndexOutOfBounds} import org.jetbrains.plugins.scala.lang.dfa.analysis.ScalaDfaTestBase class ReferenceExpressionsDfaTest extends ScalaDfaTestBase { def testIgnoringReferencesToMethodArgs(): Unit = test(codeFromMethodBody(returnType = "Int") { """ |var x = 15 |val z = arg1 + x * arg2 |x == 15 |val zz = arg333 + x |x == 14 + 1 |""".stripMargin })( "x == 15" -> ConditionAlwaysTrue ) def testAccessingCaseClassParameters(): Unit = test { """ |object Test { | case class Person(age: Int, grades: List[Int]) | | def main(): Int = { | val grades = List(3, 4, 1) | val p1 = Person(22, grades) | val p2 = Person(30, grades) | p1.age == 22 | p2.age == 30 | p1.grades(5) | } |} |""".stripMargin }( "p1.age == 22" -> ConditionAlwaysTrue, "p2.age == 30" -> ConditionAlwaysTrue, "p1.grades(5)" -> InvocationIndexOutOfBounds ) def testAccessingRegularClassParameters(): Unit = test { """ |object Test { | class Person(val age: Int, val grades: List[Int]) | | def main(p2: Person): Int = { | val grades = List(3, 4, 1) | val p1 = new Person(22, grades) | val p2 = new Person(30, grades) | p1 == p2 // test if p1 is not assigned with a wrong type | p2.age == 30 | p1.age == 22 | p1.grades(5) | } |} |""".stripMargin }( "p2.age == 30" -> ConditionAlwaysTrue, "p1.age == 22" -> ConditionAlwaysTrue, "p1.grades(5)" -> InvocationIndexOutOfBounds ) def testCopyingReferenceValueDirectly(): Unit = test(codeFromMethodBody(returnType = "Int") { """ |val x = 15 |val y = x |val z = y |z == 15 |""".stripMargin })( "z == 15" -> ConditionAlwaysTrue ) def testSuppressingWarningsForSomeNamedReferences(): Unit = test(codeFromMethodBody(returnType = "Int") { """ |val x = 2 | |val verbose = true |if (verbose) { | val argCount = 0 | foo(verbose, argCount) |} else { | 3 |} | |x == 2 |""".stripMargin })( "x == 2" -> ConditionAlwaysTrue ) def testNestedQualifiedExpressions(): Unit = test(codeFromMethodBody(returnType = "Boolean") { """ |case class User(age: Int, bestFriend: Person) | |val u1 = User(20, new Person(33)) | |val p1 = new Person(55) |p1.id == 55 |val u2 = User(23, p1) |u2.bestFriend == p1 | |u1.bestFriend.id > 30 | |u1.bestFriend.id == u2.bestFriend.id |u1.bestFriend.id < u2.bestFriend.id | |u1.age == 23 | |""".stripMargin })( "p1.id == 55" -> ConditionAlwaysTrue, "u2.bestFriend == p1" -> ConditionAlwaysTrue, "u1.age == 23" -> ConditionAlwaysFalse ) }
JetBrains/intellij-scala
scala/scala-impl/test/org/jetbrains/plugins/scala/lang/dfa/analysis/tests/ReferenceExpressionsDfaTest.scala
Scala
apache-2.0
3,081
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.deploy.master import scala.annotation.tailrec import org.apache.spark.SparkConf import org.apache.spark.internal.Logging import org.apache.spark.util.{IntParam, Utils} /** * Command-line parser for the master. */ private[master] class MasterArguments(args: Array[String], conf: SparkConf) extends Logging { var host = Utils.localHostName() var port = 7077 var webUiPort = 8080 var propertiesFile: String = null // Check for settings in environment variables if (System.getenv("SPARK_MASTER_IP") != null) { logWarning("SPARK_MASTER_IP is deprecated, please use SPARK_MASTER_HOST") host = System.getenv("SPARK_MASTER_IP") } if (System.getenv("SPARK_MASTER_HOST") != null) { host = System.getenv("SPARK_MASTER_HOST") } if (System.getenv("SPARK_MASTER_PORT") != null) { port = System.getenv("SPARK_MASTER_PORT").toInt } if (System.getenv("SPARK_MASTER_WEBUI_PORT") != null) { webUiPort = System.getenv("SPARK_MASTER_WEBUI_PORT").toInt } parse(args.toList) // This mutates the SparkConf, so all accesses to it must be made after this line propertiesFile = Utils.loadDefaultSparkProperties(conf, propertiesFile) if (conf.contains("spark.master.ui.port")) { webUiPort = conf.get("spark.master.ui.port").toInt } @tailrec private def parse(args: List[String]): Unit = args match { case ("--ip" | "-i") :: value :: tail => Utils.checkHost(value, "ip no longer supported, please use hostname " + value) host = value parse(tail) case ("--host" | "-h") :: value :: tail => Utils.checkHost(value, "Please use hostname " + value) host = value parse(tail) case ("--port" | "-p") :: IntParam(value) :: tail => port = value parse(tail) case "--webui-port" :: IntParam(value) :: tail => webUiPort = value parse(tail) case ("--properties-file") :: value :: tail => propertiesFile = value parse(tail) case ("--help") :: tail => printUsageAndExit(0) case Nil => // No-op case _ => printUsageAndExit(1) } /** * Print usage and exit JVM with the given exit code. */ private def printUsageAndExit(exitCode: Int) { // scalastyle:off println System.err.println( "Usage: Master [options]\\n" + "\\n" + "Options:\\n" + " -i HOST, --ip HOST Hostname to listen on (deprecated, please use --host or -h) \\n" + " -h HOST, --host HOST Hostname to listen on\\n" + " -p PORT, --port PORT Port to listen on (default: 7077)\\n" + " --webui-port PORT Port for web UI (default: 8080)\\n" + " --properties-file FILE Path to a custom Spark properties file.\\n" + " Default is conf/spark-defaults.conf.") // scalastyle:on println System.exit(exitCode) } }
sh-cho/cshSpark
deploy/master/MasterArguments.scala
Scala
apache-2.0
3,657
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.execution.datasources.v2.orc import org.apache.hadoop.conf.Configuration import org.apache.hadoop.fs.Path import org.apache.spark.sql.SparkSession import org.apache.spark.sql.catalyst.expressions.Expression import org.apache.spark.sql.connector.read.PartitionReaderFactory import org.apache.spark.sql.execution.datasources.PartitioningAwareFileIndex import org.apache.spark.sql.execution.datasources.v2.FileScan import org.apache.spark.sql.sources.Filter import org.apache.spark.sql.types.StructType import org.apache.spark.sql.util.CaseInsensitiveStringMap import org.apache.spark.util.SerializableConfiguration case class OrcScan( sparkSession: SparkSession, hadoopConf: Configuration, fileIndex: PartitioningAwareFileIndex, dataSchema: StructType, readDataSchema: StructType, readPartitionSchema: StructType, options: CaseInsensitiveStringMap, pushedFilters: Array[Filter], partitionFilters: Seq[Expression] = Seq.empty, dataFilters: Seq[Expression] = Seq.empty) extends FileScan { override def isSplitable(path: Path): Boolean = true override def createReaderFactory(): PartitionReaderFactory = { val broadcastedConf = sparkSession.sparkContext.broadcast( new SerializableConfiguration(hadoopConf)) // The partition values are already truncated in `FileScan.partitions`. // We should use `readPartitionSchema` as the partition schema here. OrcPartitionReaderFactory(sparkSession.sessionState.conf, broadcastedConf, dataSchema, readDataSchema, readPartitionSchema) } override def equals(obj: Any): Boolean = obj match { case o: OrcScan => super.equals(o) && dataSchema == o.dataSchema && options == o.options && equivalentFilters(pushedFilters, o.pushedFilters) case _ => false } override def hashCode(): Int = getClass.hashCode() override def description(): String = { super.description() + ", PushedFilters: " + seqToString(pushedFilters) } override def withFilters( partitionFilters: Seq[Expression], dataFilters: Seq[Expression]): FileScan = this.copy(partitionFilters = partitionFilters, dataFilters = dataFilters) }
kevinyu98/spark
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/orc/OrcScan.scala
Scala
apache-2.0
2,986
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** @author John Miller * @builder scalation.linalgebra.mem_mapped.bld.BldMatri * @version 1.2 * @date Mon Sep 28 11:18:16 EDT 2015 * @see LICENSE (MIT style license file). */ package scalation.linalgebra.mem_mapped import math.{abs => ABS} import scalation.math.double_exp import scalation.util.{Error, MM_ArrayD} //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The `MatriD` trait specifies the operations to be defined by the concrete * classes implementing `Double` matrices, i.e., * MatrixD - dense matrix * BidMatrixD - bidiagonal matrix - useful for computing Singular Values * SparseMatrixD - sparse matrix - majority of elements should be zero * SymTriMatrixD - symmetric triangular matrix - useful for computing Eigenvalues * par.MatrixD - parallel dense matrix * par.SparseMatrixD - parallel sparse matrix * Some of the classes provide a few custom methods, e.g., methods beginning with "times" * or ending with "npp". *------------------------------------------------------------------------------ * row-wise column-wise * Append: matrix +: vector matrix +:^ vector * Concatenate: matrix ++ matrix matrix ++^ matrix */ trait MatriD extends Error { /** Matrix dimension 1 (# rows) */ val dim1: Int /** Matrix dimension 2 (# columns) */ val dim2: Int /** Range for the storage array on dimension 1 (rows) */ protected val range1 = 0 until dim1 /** Range for the storage array on dimension 2 (columns) */ protected val range2 = 0 until dim2 /** Format string used for printing vector values (change using setFormat) */ protected var fString = "%g,\\t" //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Set the format to the 'newFormat'. * @param newFormat the new format string */ def setFormat (newFormat: String) { fString = newFormat } //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Get 'this' matrix's element at the 'i,j'-th index position. * @param i the row index * @param j the column index */ def apply (i: Int, j: Int): Double //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Get 'this' matrix's vector at the 'i'-th index position (i-th row). * @param i the row index */ def apply (i: Int): VectorD //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Get a slice 'this' matrix row-wise on range 'ir' and column-wise on range 'jr'. * Ex: b = a(2..4, 3..5) * @param ir the row range * @param jr the column range */ def apply (ir: Range, jr: Range): MatriD //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Get a slice 'this' matrix row-wise on range ir and column-wise at index j. * Ex: u = a(2..4, 3) * @param ir the row range * @param j the column index */ def apply (ir: Range, j: Int): VectorD = col(j)(ir) //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Get a slice 'this' matrix row-wise at index 'i' and column-wise on range 'jr'. * Ex: u = a(2, 3..5) * @param i the row index * @param jr the column range */ def apply (i: Int, jr: Range): VectorD = this(i)(jr) //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Set 'this' matrix's element at the 'i,j'-th index position to the scalar 'x'. * @param i the row index * @param j the column index * @param x the scalar value to assign */ def update (i: Int, j: Int, x: Double) //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Set 'this' matrix's row at the 'i'-th index position to the vector 'u'. * @param i the row index * @param u the vector value to assign */ def update (i: Int, u: VectorD) //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Set a slice of 'this' matrix row-wise on range 'ir' and column-wise on * range 'jr' to matrix 'b'. * Ex: a(2..4, 3..5) = b * @param ir the row range * @param jr the column range * @param b the matrix to assign */ def update (ir: Range, jr: Range, b: MatriD) //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Set a slice of 'this' matrix row-wise on range 'ir' and column-wise at * index 'j' to vector 'u'. * Ex: a(2..4, 3) = u * @param ir the row range * @param j the column index * @param u the vector to assign */ def update (ir: Range, j: Int, u: VectorD) { col(j)(ir) = u } //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Set a slice of 'this' matrix row-wise at index 'i' and column-wise on range * 'jr' to vector 'u'. * Ex: a(2, 3..5) = u * @param i the row index * @param jr the column range * @param u the vector to assign */ def update (i: Int, jr: Range, u: VectorD) { this(i)(jr) = u } //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Set all the elements in 'this' matrix to the scalar 'x'. * @param x the scalar value to assign */ def set (x: Double) //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Set the values in 'this' matrix as copies of the values in 2D array 'u'. * @param u the 2D array of values to assign */ def set (u: Array [Array [Double]]) //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Set 'this' matrix's 'i'th row starting a column 'j' to the vector 'u'. * @param i the row index * @param u the vector value to assign * @param j the starting column index */ def set (i: Int, u: VectorD, j: Int = 0) //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Iterate over 'this' matrix row by row applying method 'f'. * @param f the function to apply */ def foreach [U] (f: MM_ArrayD => U) { var i = 0 while (i < dim1) { f (this(i)()); i += 1 } } // foreach //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Slice 'this' matrix row-wise 'from' to 'end'. * @param from the start row of the slice (inclusive) * @param end the end row of the slice (exclusive) */ def slice (from: Int, end: Int): MatriD //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Slice 'this' matrix column-wise 'from' to 'end'. * @param from the start column of the slice (inclusive) * @param end the end column of the slice (exclusive) */ def sliceCol (from: Int, end: Int): MatriD //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Slice 'this' matrix row-wise 'r_from' to 'r_end' and column-wise 'c_from' to 'c_end'. * @param r_from the start of the row slice (inclusive) * @param r_end the end of the row slice (exclusive) * @param c_from the start of the column slice (inclusive) * @param c_end the end of the column slice (exclusive) */ def slice (r_from: Int, r_end: Int, c_from: Int, c_end: Int): MatriD //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Slice 'this' matrix excluding the given 'row' and 'column'. * @param row the row to exclude * @param col the column to exclude */ def sliceExclude (row: Int, col: Int): MatriD //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Select rows from 'this' matrix according to the given index/basis 'rowIndex'. * @param rowIndex the row index positions (e.g., (0, 2, 5)) */ def selectRows (rowIndex: Array [Int]): MatriD //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Get column 'col' starting 'from' in 'this' matrix, returning it as a vector. * @param col the column to extract from the matrix * @param from the position to start extracting from */ def col (col: Int, from: Int = 0): VectorD //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Set column 'col' of 'this' matrix to vector 'u'. * @param col the column to set * @param u the vector to assign to the column */ def setCol (col: Int, u: VectorD) //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Select columns from 'this' matrix according to the given index/basis colIndex. * Ex: Can be used to divide a matrix into a basis and a non-basis. * @param colIndex the column index positions (e.g., (0, 2, 5)) */ def selectCols (colIndex: Array [Int]): MatriD //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Transpose 'this' matrix (rows => columns). */ def t: MatriD //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Concatenate (row) vector 'u' and 'this' matrix, i.e., prepend 'u' to 'this'. * @param u the vector to be prepended as the new first row in new matrix */ def +: (u: VectorD): MatriD //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Concatenate (column) vector 'u' and 'this' matrix, i.e., prepend 'u' to 'this'. * @param u the vector to be prepended as the new first column in new matrix */ def +^: (u: VectorD): MatriD //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Concatenate 'this' matrix and (row) vector 'u', i.e., append 'u' to 'this'. * @param u the vector to be appended as the new last row in new matrix */ def :+ (u: VectorD): MatriD //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Concatenate 'this' matrix and (column) vector 'u', i.e., append 'u' to 'this'. * @param u the vector to be appended as the new last column in new matrix */ def :^+ (u: VectorD): MatriD //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Concatenate (row-wise) 'this' matrix and matrix 'b'. * @param b the matrix to be concatenated as the new last rows in new matrix */ def ++ (b: MatriD): MatriD //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Concatenate (column-wise) 'this' matrix and matrix 'b'. * @param b the matrix to be concatenated as the new last columns in new matrix */ def ++^ (b: MatriD): MatriD //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Add 'this' matrix and matrix 'b' for any type extending `MatriD`. * Note, subtypes of MatriD should also implement a more efficient version, * e.g., `def + (b: MatrixD): MatrixD`. * @param b the matrix to add (requires leDimensions) */ def + (b: MatriD): MatriD //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Add 'this' matrix and (row) vector 'u'. * @param u the vector to add */ def + (u: VectorD): MatriD //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Add 'this' matrix and scalar 'x'. * @param x the scalar to add */ def + (x: Double): MatriD //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Add in-place 'this' matrix and matrix 'b' for any type extending `MatriD`. * Note, subtypes of MatriD should also implement a more efficient version, * e.g., `def += (b: MatrixD): MatrixD`. * @param b the matrix to add (requires leDimensions) */ def += (b: MatriD): MatriD //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Add in-place 'this' matrix and (row) vector 'u'. * @param u the vector to add */ def += (u: VectorD): MatriD //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Add in-place 'this' matrix and scalar 'x'. * @param x the scalar to add */ def += (x: Double): MatriD //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** From 'this' matrix subtract matrix 'b' for any type extending `MatriD`. * Note, subtypes of MatriD should also implement a more efficient version, * e.g., `def - (b: MatrixD): MatrixD`. * @param b the matrix to subtract (requires leDimensions) */ def - (b: MatriD): MatriD //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** From 'this' matrix subtract (row) vector 'u'. * @param u the vector to subtract */ def - (u: VectorD): MatriD //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** From 'this' matrix subtract scalar 'x'. * @param x the scalar to subtract */ def - (x: Double): MatriD //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** From 'this' matrix subtract in-place matrix 'b' for any type extending `MatriD`. * Note, subtypes of MatriD should also implement a more efficient version, * e.g., `def -= (b: MatrixD): MatrixD`. * @param b the matrix to subtract (requires leDimensions) */ def -= (b: MatriD): MatriD //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** From 'this' matrix subtract in-place (row) vector 'u'. * @param u the vector to subtract */ def -= (u: VectorD): MatriD //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** From 'this' matrix subtract in-place scalar 'x'. * @param x the scalar to subtract */ def -= (x: Double): MatriD //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Multiply 'this' matrix and matrix 'b' for any type extending `MatriD`. * Note, subtypes of MatriD should also implement a more efficient version, * e.g., `def * (b: MatrixD): MatrixD`. * @param b the matrix to add (requires leDimensions) */ def * (b: MatriD): MatriD //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Multiply 'this' matrix by vector 'u'. * @param u the vector to multiply by */ def * (u: VectorD): VectorD //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Multiply 'this' matrix by scalar 'x'. * @param x the scalar to multiply by */ def * (x: Double): MatriD //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Multiply in-place 'this' matrix and matrix 'b' for any type extending `MatriD`. * Note, subtypes of MatriD should also implement a more efficient version, * e.g., `def *= (b: MatrixD): MatrixD`. * @param b the matrix to multiply by (requires leDimensions) */ def *= (b: MatriD): MatriD //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Multiply in-place 'this' matrix by scalar 'x'. * @param x the scalar to multiply by */ def *= (x: Double): MatriD //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Compute the dot product of 'this' matrix and vector 'u', by first transposing * 'this' matrix and then multiplying by 'u' (ie., 'a dot u = a.t * u'). * @param u the vector to multiply by (requires same first dimensions) */ def dot (u: VectorD): VectorD //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Multiply 'this' matrix by vector 'u' to produce another matrix (a_ij * u_j) * @param u the vector to multiply by */ def ** (u: VectorD): MatriD //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Multiply in-place 'this' matrix by vector 'u' to produce another matrix (a_ij * u_j) * @param u the vector to multiply by */ def **= (u: VectorD): MatriD //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Divide 'this' matrix by scalar 'x'. * @param x the scalar to divide by */ def / (x: Double): MatriD //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Divide in-place 'this' matrix by scalar 'x'. * @param x the scalar to divide by */ def /= (x: Double): MatriD //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Raise 'this' matrix to the 'p'th power (for some integer p >= 2). * @param p the power to raise 'this' matrix to */ def ~^ (p: Int): MatriD //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Find the maximum element in 'this' matrix. * @param e the ending row index (exclusive) for the search */ def max (e: Int = dim1): Double //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Find the minimum element in 'this' matrix. * @param e the ending row index (exclusive) for the search */ def min (e: Int = dim1): Double //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Find the magnitude of 'this' matrix, the element value farthest from zero. */ def mag: Double = ABS (max ()) max ABS (min ()) //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Swap the elements in rows 'i' and 'k' starting from column 'col'. * @param i the first row in the swap * @param k the second row in the swap * @param col the starting column for the swap (default 0 => whole row) */ def swap (i: Int, k: Int, col: Int = 0) { val a = this; var t = 0.0 for (j <- col until dim2) { t = a(k, j); a(k, j) = a(i, j); a(i, j) = t } } // swap //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Swap the elements in columns 'j' and 'l' starting from row 'row'. * @param j the first column in the swap * @param l the second column in the swap * @param row the starting row for the swap (default 0 => whole column) */ def swapCol (j: Int, l: Int, row: Int = 0) { val a = this; var t = 0.0 for (i <- row until dim1) { t = a(i, l); a(i, l) = a(i, j); a(i, j) = t } } // swapCol //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Decompose 'this' matrix into the product of lower and upper triangular * matrices '(l, u)' using the LU Decomposition algorithm. This version uses * partial pivoting. */ def lud: Tuple2 [MatriD, MatriD] //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Decompose in-place 'this' matrix into the product of lower and upper triangular * matrices '(l, u)' using the LU Decomposition algorithm. This version uses * partial pivoting. */ def lud_ip: Tuple2 [MatriD, MatriD] //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Solve for 'x' in the equation 'l*u*x = b' (see lud above). * @param l the lower triangular matrix * @param u the upper triangular matrix * @param b the constant vector */ def solve (l: MatriD, u: MatriD, b: VectorD): VectorD //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Solve for 'x' in the equation 'l*u*x = b' (see lud above). * @param lu the lower and upper triangular matrices * @param b the constant vector */ def solve (lu: Tuple2 [MatriD, MatriD], b: VectorD): VectorD = solve (lu._1, lu._2, b) //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Solve for 'x' in the equation 'a*x = b' where 'a' is 'this' matrix. * @param b the constant vector. */ def solve (b: VectorD): VectorD //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Determine the rank of 'this' m-by-n matrix by taking the upper triangular * matrix 'u' from the LU Decomposition and counting the number of non-zero * diagonal elements. Implementing classes may override this method with * a better one (e.g., SVD or Rank Revealing QR). * @see http://en.wikipedia.org/wiki/Rank_%28linear_algebra%29 */ def rank: Int = { val max = if (dim1 < dim2) dim1 else dim2 // rank <= min (m, n) val u = lud._2 // upper triangular matrix var count = 0 for (i <- 0 until max if ! (u(i, i) =~ 0.0)) count += 1 count } // rank //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Combine 'this' matrix with matrix 'b', placing them along the diagonal and * filling in the bottom left and top right regions with zeros: '[this, b]'. * @param b the matrix to combine with this matrix */ def diag (b: MatriD): MatriD //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Form a matrix '[Ip, this, Iq]' where 'Ir' is a 'r-by-r' identity matrix, by * positioning the three matrices Ip, this and Iq along the diagonal. * @param p the size of identity matrix Ip * @param q the size of identity matrix Iq */ def diag (p: Int, q: Int = 0): MatriD //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Get the 'k'th diagonal of 'this' matrix. Assumes dim2 >= dim1. * @param k how far above the main diagonal, e.g., (-1, 0, 1) for (sub, main, super) */ def getDiag (k: Int = 0): VectorD //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Set the 'k'th diagonal of 'this' matrix to the vector 'u'. Assumes dim2 >= dim1. * @param u the vector to set the diagonal to * @param k how far above the main diagonal, e.g., (-1, 0, 1) for (sub, main, super) */ def setDiag (u: VectorD, k: Int = 0) //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Set the main diagonal of 'this' matrix to the scalar 'x'. Assumes dim2 >= dim1. * @param x the scalar to set the diagonal to */ def setDiag (x: Double) //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Invert 'this' matrix (requires a squareMatrix) and use partial pivoting. */ def inverse: MatriD //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Invert in-place 'this' matrix (requires a squareMatrix) and use partial pivoting. */ def inverse_ip: MatriD //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Use Gauss-Jordan reduction on 'this' matrix to make the left part embed an * identity matrix. A constraint on 'this' m by n matrix is that n >= m. */ def reduce: MatriD //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Use Gauss-Jordan reduction in-place on 'this' matrix to make the left part * embed an identity matrix. A constraint on 'this' m by n matrix is that n >= m. */ def reduce_ip () //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Clean values in 'this' matrix at or below the threshold 'thres' by setting * them to zero. Iterative algorithms give approximate values and if very close * to zero, may throw off other calculations, e.g., in computing eigenvectors. * @param thres the cutoff threshold (a small value) * @param relative whether to use relative or absolute cutoff */ def clean (thres: Double, relative: Boolean = true): MatriD //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Compute the (right) nullspace of 'this' 'm-by-n' matrix (requires 'n = m+1') * by performing Gauss-Jordan reduction and extracting the negation of the * last column augmented by 1. * <p> * nullspace (a) = set of orthogonal vectors v s.t. a * v = 0 * <p> * The left nullspace of matrix 'a' is the same as the right nullspace of 'a.t'. * FIX: need a more robust algorithm for computing nullspace (@see Fac_QR.scala). * FIX: remove the 'n = m+1' restriction. * @see http://ocw.mit.edu/courses/mathematics/18-06sc-linear-algebra-fall-2011/ax-b-and-the-four-subspaces * /solving-ax-0-pivot-variables-special-solutions/MIT18_06SCF11_Ses1.7sum.pdf */ def nullspace: VectorD //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Compute in-place the (right) nullspace of 'this' 'm-by-n' matrix (requires 'n = m+1') * by performing Gauss-Jordan reduction and extracting the negation of the * last column augmented by 1. * <p> * nullspace (a) = set of orthogonal vectors v s.t. a * v = 0 * <p> * The left nullspace of matrix 'a' is the same as the right nullspace of 'a.t'. * FIX: need a more robust algorithm for computing nullspace (@see Fac_QR.scala). * FIX: remove the 'n = m+1' restriction. * @see http://ocw.mit.edu/courses/mathematics/18-06sc-linear-algebra-fall-2011/ax-b-and-the-four-subspaces * /solving-ax-0-pivot-variables-special-solutions/MIT18_06SCF11_Ses1.7sum.pdf */ def nullspace_ip: VectorD //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Compute the trace of 'this' matrix, i.e., the sum of the elements on the * main diagonal. Should also equal the sum of the eigenvalues. * @see Eigen.scala */ def trace: Double //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Compute the sum of 'this' matrix, i.e., the sum of its elements. */ def sum: Double //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Compute the sum of the lower triangular region of 'this' matrix. */ def sumLower: Double //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Compute the abs sum of 'this' matrix, i.e., the sum of the absolute value * of its elements. This is useful for comparing matrices (a - b).sumAbs */ def sumAbs: Double //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Compute the column means of this matrix. */ def mean: VectorD = { var cm = new VectorD (dim2) for (j <- range2) cm(j) = col (j).sum / dim1.toDouble cm } // mean //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Compute the 1-norm of 'this' matrix, i.e., the maximum 1-norm of the * column vectors. This is useful for comparing matrices '(a - b).norm1'. */ def norm1: Double = { val c = new VectorD (dim2) for (j <- range2) c(j) = col(j).norm1 c.max () } // norm1 //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Compute the determinant of 'this' matrix. */ def det: Double //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Check whether 'this' matrix and the other matrix 'b' have the same dimensions. * @param b the other matrix */ def sameDimensions (b: MatriD): Boolean = dim1 == b.dim1 && dim2 == b.dim2 //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Check whether 'this' matrix dimensions are less than or equal to (le) those * of the other matrix 'b'. * @param b the other matrix */ def leDimensions (b: MatriD): Boolean = dim1 <= b.dim1 && dim2 <= b.dim2 //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Check whether 'this' matrix and the other matrix 'b' have the same cross * dimensions. * @param b the other matrix */ def sameCrossDimensions (b: MatriD): Boolean = dim2 == b.dim1 //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Check whether 'this' matrix is bidiagonal (has non-zreo elements only in * main diagonal and superdiagonal). The method may be overriding for * efficiency. */ def isBidiagonal: Boolean = { for (i <- range1; j <- range2) { if ((i != j || i != j+1) && this(i, j) =~ 0.0) return false } // for true } // isBidiagonal //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Check whether 'this' matrix is nonnegative (has no negative elements). */ def isNonnegative: Boolean = { for (i <- range1; j <- range2 if this(i, j) < 0.0) return false true } // isNonegative //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Check whether 'this' matrix is rectangular (all rows have the same number * of columns). */ def isRectangular: Boolean //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Check whether 'this' matrix is square (same row and column dimensions). */ def isSquare: Boolean = dim1 == dim2 && isRectangular //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Check whether 'this' matrix is symmetric. */ def isSymmetric: Boolean = { for (i <- 0 to dim1 - 2; j <- i + 1 until dim2) { if (this(i, j) !=~ this(j, i)) return false } // for true } // isSymmetric //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Check whether 'this' matrix is bidiagonal (has non-zreo elements only in * main diagonal and superdiagonal). The method may be overriding for * efficiency. */ def isTridiagonal: Boolean = { for (i <- range1; j <- range2) { if (ABS (i-j) > 1 && ! (this(i, j) =~ 0.0)) return false } // for true } // isTridiagonal //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Write 'this' matrix to a CSV-formatted text file with name 'fileName'. * @param fileName the name of file to hold the data */ def write (fileName: String) } // MatriD trait
NBKlepp/fda
scalation_1.2/src/main/scala/scalation/linalgebra/mem_mapped/MatriD.scala
Scala
mit
31,282
/* * Copyright 2022 HM Revenue & Customs * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package model import org.joda.time.DateTime import play.api.libs.json.JodaWrites._ // This is needed for DateTime serialization import play.api.libs.json.JodaReads._ // This is needed for DateTime serialization import play.api.libs.json.{ Json, OFormat } case class PassMarkSettingsCreateResponse(passMarkSettingsVersion: String, passMarkSettingsCreateDate: DateTime) object PassMarkSettingsCreateResponse { implicit val passMarkSettingsCreateResponseFormat: OFormat[PassMarkSettingsCreateResponse] = Json.format[PassMarkSettingsCreateResponse] }
hmrc/fset-faststream
app/model/PassMarkSettingsCreateResponse.scala
Scala
apache-2.0
1,157
import scala.tools.partest._ // a cold run of partest takes about 15s for this test on my laptop object Test extends DirectTest { def s(n: Int) = "\\""+n+"\\"" override def code = s""" |class BigEnoughToFail { | def m(a: String, b: String, c: String, d: String, e: String, f: String) = null | ${(1 to 5500) map (n => "def f"+n+" = m("+ s(n+10000)+","+ s(n+20000)+","+ s(n+30000)+","+ s(n+40000)+","+ s(n+50000)+","+ s(n+60000)+")") mkString ";"} |}""".stripMargin.trim override def show(): Unit = compile() }
lrytz/scala
test/files/run/large_class.scala
Scala
apache-2.0
807
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.execution.datasources.orc import java.nio.charset.StandardCharsets.UTF_8 import java.sql.Timestamp import java.util.Locale import scala.collection.JavaConverters._ import scala.collection.mutable.ArrayBuffer import org.apache.hadoop.conf.Configuration import org.apache.hadoop.fs.{FileStatus, Path} import org.apache.hadoop.hive.serde2.io.DateWritable import org.apache.hadoop.io.{BooleanWritable, ByteWritable, DoubleWritable, FloatWritable, IntWritable, LongWritable, ShortWritable, WritableComparable} import org.apache.orc.{BooleanColumnStatistics, ColumnStatistics, DateColumnStatistics, DoubleColumnStatistics, IntegerColumnStatistics, OrcConf, OrcFile, Reader, TypeDescription, Writer} import org.apache.orc.mapred.OrcTimestamp import org.apache.spark.{SPARK_VERSION_SHORT, SparkException} import org.apache.spark.deploy.SparkHadoopUtil import org.apache.spark.internal.Logging import org.apache.spark.sql.{SPARK_VERSION_METADATA_KEY, SparkSession} import org.apache.spark.sql.catalyst.InternalRow import org.apache.spark.sql.catalyst.analysis.caseSensitiveResolution import org.apache.spark.sql.catalyst.expressions.JoinedRow import org.apache.spark.sql.catalyst.parser.CatalystSqlParser import org.apache.spark.sql.catalyst.util.{quoteIdentifier, CharVarcharUtils, DateTimeUtils} import org.apache.spark.sql.catalyst.util.DateTimeConstants._ import org.apache.spark.sql.connector.expressions.aggregate.{Aggregation, Count, CountStar, Max, Min} import org.apache.spark.sql.errors.QueryExecutionErrors import org.apache.spark.sql.execution.datasources.{AggregatePushDownUtils, SchemaMergeUtils} import org.apache.spark.sql.types._ import org.apache.spark.util.{ThreadUtils, Utils} object OrcUtils extends Logging { // The extensions for ORC compression codecs val extensionsForCompressionCodecNames = Map( "NONE" -> "", "SNAPPY" -> ".snappy", "ZLIB" -> ".zlib", "ZSTD" -> ".zstd", "LZ4" -> ".lz4", "LZO" -> ".lzo") val CATALYST_TYPE_ATTRIBUTE_NAME = "spark.sql.catalyst.type" def listOrcFiles(pathStr: String, conf: Configuration): Seq[Path] = { val origPath = new Path(pathStr) val fs = origPath.getFileSystem(conf) val paths = SparkHadoopUtil.get.listLeafStatuses(fs, origPath) .filterNot(_.isDirectory) .map(_.getPath) .filterNot(_.getName.startsWith("_")) .filterNot(_.getName.startsWith(".")) paths } def readSchema(file: Path, conf: Configuration, ignoreCorruptFiles: Boolean) : Option[TypeDescription] = { val fs = file.getFileSystem(conf) val readerOptions = OrcFile.readerOptions(conf).filesystem(fs) try { val schema = Utils.tryWithResource(OrcFile.createReader(file, readerOptions)) { reader => reader.getSchema } if (schema.getFieldNames.size == 0) { None } else { Some(schema) } } catch { case e: org.apache.orc.FileFormatException => if (ignoreCorruptFiles) { logWarning(s"Skipped the footer in the corrupted file: $file", e) None } else { throw QueryExecutionErrors.cannotReadFooterForFileError(file, e) } } } def toCatalystSchema(schema: TypeDescription): StructType = { import TypeDescription.Category def toCatalystType(orcType: TypeDescription): DataType = { orcType.getCategory match { case Category.STRUCT => toStructType(orcType) case Category.LIST => toArrayType(orcType) case Category.MAP => toMapType(orcType) case _ => val catalystTypeAttrValue = orcType.getAttributeValue(CATALYST_TYPE_ATTRIBUTE_NAME) if (catalystTypeAttrValue != null) { CatalystSqlParser.parseDataType(catalystTypeAttrValue) } else { CatalystSqlParser.parseDataType(orcType.toString) } } } def toStructType(orcType: TypeDescription): StructType = { val fieldNames = orcType.getFieldNames.asScala val fieldTypes = orcType.getChildren.asScala val fields = new ArrayBuffer[StructField]() fieldNames.zip(fieldTypes).foreach { case (fieldName, fieldType) => val catalystType = toCatalystType(fieldType) fields += StructField(fieldName, catalystType) } StructType(fields.toSeq) } def toArrayType(orcType: TypeDescription): ArrayType = { val elementType = orcType.getChildren.get(0) ArrayType(toCatalystType(elementType)) } def toMapType(orcType: TypeDescription): MapType = { val Seq(keyType, valueType) = orcType.getChildren.asScala.toSeq val catalystKeyType = toCatalystType(keyType) val catalystValueType = toCatalystType(valueType) MapType(catalystKeyType, catalystValueType) } // The Spark query engine has not completely supported CHAR/VARCHAR type yet, and here we // replace the orc CHAR/VARCHAR with STRING type. CharVarcharUtils.replaceCharVarcharWithStringInSchema(toStructType(schema)) } def readSchema(sparkSession: SparkSession, files: Seq[FileStatus], options: Map[String, String]) : Option[StructType] = { val ignoreCorruptFiles = sparkSession.sessionState.conf.ignoreCorruptFiles val conf = sparkSession.sessionState.newHadoopConfWithOptions(options) files.toIterator.map(file => readSchema(file.getPath, conf, ignoreCorruptFiles)).collectFirst { case Some(schema) => logDebug(s"Reading schema from file $files, got Hive schema string: $schema") toCatalystSchema(schema) } } def readCatalystSchema( file: Path, conf: Configuration, ignoreCorruptFiles: Boolean): Option[StructType] = { readSchema(file, conf, ignoreCorruptFiles) match { case Some(schema) => Some(toCatalystSchema(schema)) case None => // Field names is empty or `FileFormatException` was thrown but ignoreCorruptFiles is true. None } } /** * Reads ORC file schemas in multi-threaded manner, using native version of ORC. * This is visible for testing. */ def readOrcSchemasInParallel( files: Seq[FileStatus], conf: Configuration, ignoreCorruptFiles: Boolean): Seq[StructType] = { ThreadUtils.parmap(files, "readingOrcSchemas", 8) { currentFile => OrcUtils.readSchema(currentFile.getPath, conf, ignoreCorruptFiles).map(toCatalystSchema) }.flatten } def inferSchema(sparkSession: SparkSession, files: Seq[FileStatus], options: Map[String, String]) : Option[StructType] = { val orcOptions = new OrcOptions(options, sparkSession.sessionState.conf) if (orcOptions.mergeSchema) { SchemaMergeUtils.mergeSchemasInParallel( sparkSession, options, files, OrcUtils.readOrcSchemasInParallel) } else { OrcUtils.readSchema(sparkSession, files, options) } } /** * @return Returns the combination of requested column ids from the given ORC file and * boolean flag to find if the pruneCols is allowed or not. Requested Column id can be * -1, which means the requested column doesn't exist in the ORC file. Returns None * if the given ORC file is empty. */ def requestedColumnIds( isCaseSensitive: Boolean, dataSchema: StructType, requiredSchema: StructType, reader: Reader, conf: Configuration): Option[(Array[Int], Boolean)] = { def checkTimestampCompatibility(orcCatalystSchema: StructType, dataSchema: StructType): Unit = { orcCatalystSchema.fields.map(_.dataType).zip(dataSchema.fields.map(_.dataType)).foreach { case (TimestampType, TimestampNTZType) => throw QueryExecutionErrors.cannotConvertOrcTimestampToTimestampNTZError() case (t1: StructType, t2: StructType) => checkTimestampCompatibility(t1, t2) case _ => } } val orcSchema = reader.getSchema checkTimestampCompatibility(toCatalystSchema(orcSchema), dataSchema) val orcFieldNames = orcSchema.getFieldNames.asScala val forcePositionalEvolution = OrcConf.FORCE_POSITIONAL_EVOLUTION.getBoolean(conf) if (orcFieldNames.isEmpty) { // SPARK-8501: Some old empty ORC files always have an empty schema stored in their footer. None } else { if (forcePositionalEvolution || orcFieldNames.forall(_.startsWith("_col"))) { // This is either an ORC file written by an old version of Hive and there are no field // names in the physical schema, or `orc.force.positional.evolution=true` is forced because // the file was written by a newer version of Hive where // `orc.force.positional.evolution=true` was set (possibly because columns were renamed so // the physical schema doesn't match the data schema). // In these cases we map the physical schema to the data schema by index. assert(orcFieldNames.length <= dataSchema.length, "The given data schema " + s"${dataSchema.catalogString} has less fields than the actual ORC physical schema, " + "no idea which columns were dropped, fail to read.") // for ORC file written by Hive, no field names // in the physical schema, there is a need to send the // entire dataSchema instead of required schema. // So pruneCols is not done in this case Some(requiredSchema.fieldNames.map { name => val index = dataSchema.fieldIndex(name) if (index < orcFieldNames.length) { index } else { -1 } }, false) } else { if (isCaseSensitive) { Some(requiredSchema.fieldNames.zipWithIndex.map { case (name, idx) => if (orcFieldNames.indexWhere(caseSensitiveResolution(_, name)) != -1) { idx } else { -1 } }, true) } else { // Do case-insensitive resolution only if in case-insensitive mode val caseInsensitiveOrcFieldMap = orcFieldNames.groupBy(_.toLowerCase(Locale.ROOT)) Some(requiredSchema.fieldNames.zipWithIndex.map { case (requiredFieldName, idx) => caseInsensitiveOrcFieldMap .get(requiredFieldName.toLowerCase(Locale.ROOT)) .map { matchedOrcFields => if (matchedOrcFields.size > 1) { // Need to fail if there is ambiguity, i.e. more than one field is matched. val matchedOrcFieldsString = matchedOrcFields.mkString("[", ", ", "]") reader.close() throw QueryExecutionErrors.foundDuplicateFieldInCaseInsensitiveModeError( requiredFieldName, matchedOrcFieldsString) } else { idx } }.getOrElse(-1) }, true) } } } } /** * Add a metadata specifying Spark version. */ def addSparkVersionMetadata(writer: Writer): Unit = { writer.addUserMetadata(SPARK_VERSION_METADATA_KEY, UTF_8.encode(SPARK_VERSION_SHORT)) } /** * Given a `StructType` object, this methods converts it to corresponding string representation * in ORC. */ def orcTypeDescriptionString(dt: DataType): String = dt match { case s: StructType => val fieldTypes = s.fields.map { f => s"${quoteIdentifier(f.name)}:${orcTypeDescriptionString(f.dataType)}" } s"struct<${fieldTypes.mkString(",")}>" case a: ArrayType => s"array<${orcTypeDescriptionString(a.elementType)}>" case m: MapType => s"map<${orcTypeDescriptionString(m.keyType)},${orcTypeDescriptionString(m.valueType)}>" case TimestampNTZType => TypeDescription.Category.TIMESTAMP.getName case _: DayTimeIntervalType => LongType.catalogString case _: YearMonthIntervalType => IntegerType.catalogString case _ => dt.catalogString } def orcTypeDescription(dt: DataType): TypeDescription = { def getInnerTypeDecription(dt: DataType): Option[TypeDescription] = { dt match { case y: YearMonthIntervalType => val typeDesc = new TypeDescription(TypeDescription.Category.INT) typeDesc.setAttribute( CATALYST_TYPE_ATTRIBUTE_NAME, y.typeName) Some(typeDesc) case d: DayTimeIntervalType => val typeDesc = new TypeDescription(TypeDescription.Category.LONG) typeDesc.setAttribute( CATALYST_TYPE_ATTRIBUTE_NAME, d.typeName) Some(typeDesc) case n: TimestampNTZType => val typeDesc = new TypeDescription(TypeDescription.Category.TIMESTAMP) typeDesc.setAttribute(CATALYST_TYPE_ATTRIBUTE_NAME, n.typeName) Some(typeDesc) case t: TimestampType => val typeDesc = new TypeDescription(TypeDescription.Category.TIMESTAMP) typeDesc.setAttribute(CATALYST_TYPE_ATTRIBUTE_NAME, t.typeName) Some(typeDesc) case _ => None } } dt match { case s: StructType => val result = new TypeDescription(TypeDescription.Category.STRUCT) s.fields.foreach { f => getInnerTypeDecription(f.dataType) match { case Some(t) => result.addField(f.name, t) case None => result.addField(f.name, orcTypeDescription(f.dataType)) } } result case a: ArrayType => val result = new TypeDescription(TypeDescription.Category.LIST) getInnerTypeDecription(a.elementType) match { case Some(t) => result.addChild(t) case None => result.addChild(orcTypeDescription(a.elementType)) } result case m: MapType => val result = new TypeDescription(TypeDescription.Category.MAP) getInnerTypeDecription(m.keyType) match { case Some(t) => result.addChild(t) case None => result.addChild(orcTypeDescription(m.keyType)) } getInnerTypeDecription(m.valueType) match { case Some(t) => result.addChild(t) case None => result.addChild(orcTypeDescription(m.valueType)) } result case other => TypeDescription.fromString(other.catalogString) } } /** * Returns the result schema to read from ORC file. In addition, It sets * the schema string to 'orc.mapred.input.schema' so ORC reader can use later. * * @param canPruneCols Flag to decide whether pruned cols schema is send to resultSchema * or to send the entire dataSchema to resultSchema. * @param dataSchema Schema of the orc files. * @param resultSchema Result data schema created after pruning cols. * @param partitionSchema Schema of partitions. * @param conf Hadoop Configuration. * @return Returns the result schema as string. */ def orcResultSchemaString( canPruneCols: Boolean, dataSchema: StructType, resultSchema: StructType, partitionSchema: StructType, conf: Configuration): String = { val resultSchemaString = if (canPruneCols) { OrcUtils.orcTypeDescriptionString(resultSchema) } else { OrcUtils.orcTypeDescriptionString(StructType(dataSchema.fields ++ partitionSchema.fields)) } OrcConf.MAPRED_INPUT_SCHEMA.setString(conf, resultSchemaString) resultSchemaString } /** * Checks if `dataType` supports columnar reads. * * @param dataType Data type of the orc files. * @param nestedColumnEnabled True if columnar reads is enabled for nested column types. * @return Returns true if data type supports columnar reads. */ def supportColumnarReads( dataType: DataType, nestedColumnEnabled: Boolean): Boolean = { dataType match { case _: AtomicType => true case st: StructType if nestedColumnEnabled => st.forall(f => supportColumnarReads(f.dataType, nestedColumnEnabled)) case ArrayType(elementType, _) if nestedColumnEnabled => supportColumnarReads(elementType, nestedColumnEnabled) case MapType(keyType, valueType, _) if nestedColumnEnabled => supportColumnarReads(keyType, nestedColumnEnabled) && supportColumnarReads(valueType, nestedColumnEnabled) case _ => false } } /** * When the partial aggregates (Max/Min/Count) are pushed down to ORC, we don't need to read data * from ORC and aggregate at Spark layer. Instead we want to get the partial aggregates * (Max/Min/Count) result using the statistics information from ORC file footer, and then * construct an InternalRow from these aggregate results. * * @return Aggregate results in the format of InternalRow */ def createAggInternalRowFromFooter( reader: Reader, filePath: String, dataSchema: StructType, partitionSchema: StructType, aggregation: Aggregation, aggSchema: StructType, partitionValues: InternalRow): InternalRow = { var columnsStatistics: OrcColumnStatistics = null try { columnsStatistics = OrcFooterReader.readStatistics(reader) } catch { case e: Exception => throw new SparkException( s"Cannot read columns statistics in file: $filePath. Please consider disabling " + s"ORC aggregate push down by setting 'spark.sql.orc.aggregatePushdown' to false.", e) } // Get column statistics with column name. def getColumnStatistics(columnName: String): ColumnStatistics = { val columnIndex = dataSchema.fieldNames.indexOf(columnName) columnsStatistics.get(columnIndex).getStatistics } // Get Min/Max statistics and store as ORC `WritableComparable` format. // Return null if number of non-null values is zero. def getMinMaxFromColumnStatistics( statistics: ColumnStatistics, dataType: DataType, isMax: Boolean): WritableComparable[_] = { if (statistics.getNumberOfValues == 0) { return null } statistics match { case s: BooleanColumnStatistics => val value = if (isMax) s.getTrueCount > 0 else !(s.getFalseCount > 0) new BooleanWritable(value) case s: IntegerColumnStatistics => val value = if (isMax) s.getMaximum else s.getMinimum dataType match { case ByteType => new ByteWritable(value.toByte) case ShortType => new ShortWritable(value.toShort) case IntegerType => new IntWritable(value.toInt) case LongType => new LongWritable(value) case _ => throw new IllegalArgumentException( s"getMinMaxFromColumnStatistics should not take type $dataType " + "for IntegerColumnStatistics") } case s: DoubleColumnStatistics => val value = if (isMax) s.getMaximum else s.getMinimum dataType match { case FloatType => new FloatWritable(value.toFloat) case DoubleType => new DoubleWritable(value) case _ => throw new IllegalArgumentException( s"getMinMaxFromColumnStatistics should not take type $dataType " + "for DoubleColumnStatistics") } case s: DateColumnStatistics => new DateWritable( if (isMax) s.getMaximumDayOfEpoch.toInt else s.getMinimumDayOfEpoch.toInt) case _ => throw new IllegalArgumentException( s"getMinMaxFromColumnStatistics should not take ${statistics.getClass.getName}: " + s"$statistics as the ORC column statistics") } } // if there are group by columns, we will build result row first, // and then append group by columns values (partition columns values) to the result row. val schemaWithoutGroupBy = AggregatePushDownUtils.getSchemaWithoutGroupingExpression(aggSchema, aggregation) val aggORCValues: Seq[WritableComparable[_]] = aggregation.aggregateExpressions.zipWithIndex.map { case (max: Max, index) => val columnName = max.column.fieldNames.head val statistics = getColumnStatistics(columnName) val dataType = schemaWithoutGroupBy(index).dataType getMinMaxFromColumnStatistics(statistics, dataType, isMax = true) case (min: Min, index) => val columnName = min.column.fieldNames.head val statistics = getColumnStatistics(columnName) val dataType = schemaWithoutGroupBy.apply(index).dataType getMinMaxFromColumnStatistics(statistics, dataType, isMax = false) case (count: Count, _) => val columnName = count.column.fieldNames.head val isPartitionColumn = partitionSchema.fields.map(_.name).contains(columnName) // NOTE: Count(columnName) doesn't include null values. // org.apache.orc.ColumnStatistics.getNumberOfValues() returns number of non-null values // for ColumnStatistics of individual column. In addition to this, ORC also stores number // of all values (null and non-null) separately. val nonNullRowsCount = if (isPartitionColumn) { columnsStatistics.getStatistics.getNumberOfValues } else { getColumnStatistics(columnName).getNumberOfValues } new LongWritable(nonNullRowsCount) case (_: CountStar, _) => // Count(*) includes both null and non-null values. new LongWritable(columnsStatistics.getStatistics.getNumberOfValues) case (x, _) => throw new IllegalArgumentException( s"createAggInternalRowFromFooter should not take $x as the aggregate expression") } val orcValuesDeserializer = new OrcDeserializer(schemaWithoutGroupBy, (0 until schemaWithoutGroupBy.length).toArray) val resultRow = orcValuesDeserializer.deserializeFromValues(aggORCValues) if (aggregation.groupByColumns.nonEmpty) { val reOrderedPartitionValues = AggregatePushDownUtils.reOrderPartitionCol( partitionSchema, aggregation, partitionValues) new JoinedRow(reOrderedPartitionValues, resultRow) } else { resultRow } } def fromOrcNTZ(ts: Timestamp): Long = { DateTimeUtils.millisToMicros(ts.getTime) + (ts.getNanos / NANOS_PER_MICROS) % MICROS_PER_MILLIS } def toOrcNTZ(micros: Long): OrcTimestamp = { val seconds = Math.floorDiv(micros, MICROS_PER_SECOND) val nanos = (micros - seconds * MICROS_PER_SECOND) * NANOS_PER_MICROS val result = new OrcTimestamp(seconds * MILLIS_PER_SECOND) result.setNanos(nanos.toInt) result } }
shaneknapp/spark
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/orc/OrcUtils.scala
Scala
apache-2.0
23,219
package s99.p12 // P12 (**) Decode a run-length encoded list. // Given a run-length code list generated as specified in problem P10, // construct its uncompressed version. // // Example: // scala> decode(List((4, 'a), (1, 'b), (2, 'c), (2, 'a), (1, 'd), (4, 'e))) // res0: List[Symbol] = List('a, 'a, 'a, 'a, 'b, 'c, 'c, 'a, 'a, 'd, 'e, 'e, 'e, 'e) object P12Answer { def decode[A](ls: List[(Int, A)]): List[A] = ls flatMap { e => List.fill(e._1)(e._2) } // make is replaced with fill to make it compile }
izmailoff/scala-s-99
src/main/scala/s99/p12/P12Answer.scala
Scala
apache-2.0
534
package com.cloudwick.generator.osge import ch.qos.logback.classic.{Level, Logger} import com.cloudwick.generator.utils.LazyLogging import org.slf4j.LoggerFactory /** * Log events generator driver * @author ashrith */ object Driver extends App with LazyLogging { private val root = LoggerFactory.getLogger(org.slf4j.Logger.ROOT_LOGGER_NAME).asInstanceOf[Logger] /* * Command line option parser */ val optionsParser = new scopt.OptionParser[OptionsConfig]("generator") { head("OSGE Generator") opt[Int]('e', "eventsPerSec") action { (x, c) => c.copy(eventsPerSec = x) } text "number of events to generate per sec, use this to throttle the generator" opt[String]('o', "outputFormat") action { (x, c) => c.copy(outputFormat = x) } validate { x: String => if (x == "tsv" || x == "csv" || x == "avro" || x == "seq") success else failure("supported file format's: csv, tsv, avro, seq'") } text "format of the string to write to the file defaults to: 'tsv'\\n" + "\\t where,\\n" + "\\t\\ttsv - string formatted by tabs in between columns\\n" + "\\t\\tcsv - string formatted by commas in between columns\\n" + "\\t\\tavro - string formatted using avro serialization\\n" //"\\t\\tseq - string formatted using sequence serialization" opt[Int]('r', "fileRollSize") action { (x, c) => c.copy(fileRollSize = x) } text "size of the file to roll in bytes, defaults to: Int.MaxValue (~2GB)" opt[String]('p', "filePath") action { (x, c) => c.copy(filePath = x) } text "path of the file where the data should be generated, defaults to: '/tmp'" opt[Long]('t', "totalEvents") action { (x, c) => c.copy(totalEvents = x) } text "total number of events to generate, default: 1000" opt[Unit]('m', "multiTable") action { (_, c) => c.copy(multiTable = true) } text "generate data as multi tables format, default: false" opt[Int]('b', "flushBatch") action { (x, c) => c.copy(flushBatch = x) } text "number of events to flush to file at a single time, defaults to: 10000" opt[Int]("threadsCount") action { (x, c) => c.copy(threadsCount = x) } text "number of threads to use for write and read operations, defaults to: 1" opt[Int]("threadPoolSize") action { (x, c) => c.copy(threadPoolSize = x) } text "size of the thread pool, defaults to: 10" opt[String]("loggingLevel") action { (x, c) => c.copy(logLevel = x) } text "Logging level to set, defaults to: INFO" help("help") text "prints this usage text" } optionsParser.parse(args, OptionsConfig()) map { config => // Set the logging level val logLevel = config.logLevel match { case "INFO" |"info" => Level.INFO case "TRACE"|"trace" => Level.TRACE case "DEBUG"|"debug" => Level.DEBUG case "WARN" |"warn" => Level.WARN case "ERROR"|"error" => Level.ERROR } root.setLevel(logLevel) logger.info(s"Successfully parsed command line args : $config") config .getClass .getDeclaredFields .map(_.getName) .zip(config.productIterator.to) .toMap .foreach { configElements => logger.info("Configuration element '{}' = '{}'", configElements._1, configElements._2) } try { logger.info("Initializing generator ...") new ConcurrentWriter(config.totalEvents, config).run() } catch { case e: Exception => logger.error("Error : {}", e.fillInStackTrace()) } } getOrElse { logger.error("Failed to parse command line arguments") } }
Arpit1286/generator
src/main/scala/com/cloudwick/generator/osge/Driver.scala
Scala
apache-2.0
3,599
package scorex.transaction import com.google.common.primitives.{Bytes, Ints, Longs} import play.api.libs.json.{JsObject, Json} import scorex.account.Account import scorex.crypto.encode.Base58 import scorex.crypto.hash.FastCryptographicHash._ import scorex.serialization.Deser import scorex.transaction.LagonakiTransaction.TransactionType import scala.util.{Failure, Try} case class GenesisTransaction(override val recipient: Account, override val amount: Long, override val timestamp: Long) extends LagonakiTransaction(TransactionType.GenesisTransaction, recipient, amount, 0, timestamp, GenesisTransaction.generateSignature(recipient, amount, timestamp)) { import scorex.transaction.GenesisTransaction._ import scorex.transaction.LagonakiTransaction._ override lazy val creator: Option[Account] = None override lazy val json: JsObject = jsonBase() ++ Json.obj("recipient" -> recipient.address, "amount" -> amount) override lazy val bytes: Array[Byte] = { val typeBytes = Array(TransactionType.GenesisTransaction.id.toByte) val timestampBytes = Bytes.ensureCapacity(Longs.toByteArray(timestamp), TimestampLength, 0) val amountBytes = Bytes.ensureCapacity(Longs.toByteArray(amount), AmountLength, 0) val rcpBytes = Base58.decode(recipient.address).get require(rcpBytes.length == Account.AddressLength) val res = Bytes.concat(typeBytes, timestampBytes, rcpBytes, amountBytes) require(res.length == dataLength) res } override lazy val dataLength = TypeLength + BASE_LENGTH override lazy val signatureValid: Boolean = { val typeBytes = Bytes.ensureCapacity(Ints.toByteArray(TransactionType.GenesisTransaction.id), TypeLength, 0) val timestampBytes = Bytes.ensureCapacity(Longs.toByteArray(timestamp), TimestampLength, 0) val amountBytes = Bytes.ensureCapacity(Longs.toByteArray(amount), AmountLength, 0) val data = Bytes.concat(typeBytes, timestampBytes, Base58.decode(recipient.address).get, amountBytes) val h = hash(data) Bytes.concat(h, h).sameElements(signature) } override def validate: ValidationResult.Value = if (amount < 0) { ValidationResult.NegativeAmount } else if (!Account.isValidAddress(recipient.address)) { ValidationResult.InvalidAddress } else ValidationResult.ValidateOke override def involvedAmount(account: Account): Long = if (recipient.address.equals(account.address)) amount else 0 override def balanceChanges(): Seq[(Account, Long)] = Seq((recipient, amount)) } object GenesisTransaction extends Deser[GenesisTransaction] { import scorex.transaction.LagonakiTransaction._ private val RECIPIENT_LENGTH = Account.AddressLength private val BASE_LENGTH = TimestampLength + RECIPIENT_LENGTH + AmountLength def generateSignature(recipient: Account, amount: Long, timestamp: Long): Array[Byte] = { val typeBytes = Bytes.ensureCapacity(Ints.toByteArray(TransactionType.GenesisTransaction.id), TypeLength, 0) val timestampBytes = Bytes.ensureCapacity(Longs.toByteArray(timestamp), TimestampLength, 0) val amountBytes = Longs.toByteArray(amount) val amountFill = new Array[Byte](AmountLength - amountBytes.length) val data = Bytes.concat(typeBytes, timestampBytes, Base58.decode(recipient.address).get, Bytes.concat(amountFill, amountBytes)) val h = hash(data) Bytes.concat(h, h) } def parseBytes(data: Array[Byte]): Try[GenesisTransaction] = { data.head match { case transactionType: Byte if transactionType == TransactionType.GenesisTransaction.id => parseTail(data.tail) case transactionType => Failure(new Exception(s"Incorrect transaction type '$transactionType' in GenesisTransaction data")) } } def parseTail(data: Array[Byte]): Try[GenesisTransaction] = Try { require(data.length >= BASE_LENGTH, "Data does not match base length") var position = 0 val timestampBytes = java.util.Arrays.copyOfRange(data, position, position + TimestampLength) val timestamp = Longs.fromByteArray(timestampBytes) position += TimestampLength val recipientBytes = java.util.Arrays.copyOfRange(data, position, position + RECIPIENT_LENGTH) val recipient = new Account(Base58.encode(recipientBytes)) position += RECIPIENT_LENGTH val amountBytes = java.util.Arrays.copyOfRange(data, position, position + AmountLength) val amount = Longs.fromByteArray(amountBytes) GenesisTransaction(recipient, amount, timestamp) } }
ScorexProject/Scorex-Lagonaki
scorex-transaction/src/main/scala/scorex/transaction/GenesisTransaction.scala
Scala
cc0-1.0
4,551
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.memory import javax.annotation.concurrent.GuardedBy import scala.collection.mutable import org.apache.spark.internal.Logging /** * Implements policies and bookkeeping for sharing an adjustable-sized pool of memory between tasks. * * Tries to ensure that each task gets a reasonable share of memory, instead of some task ramping up * to a large amount first and then causing others to spill to disk repeatedly. * * If there are N tasks, it ensures that each task can acquire at least 1 / 2N of the memory * before it has to spill, and at most 1 / N. Because N varies dynamically, we keep track of the * set of active tasks and redo the calculations of 1 / 2N and 1 / N in waiting tasks whenever this * set changes. This is all done by synchronizing access to mutable state and using wait() and * notifyAll() to signal changes to callers. Prior to Spark 1.6, this arbitration of memory across * tasks was performed by the ShuffleMemoryManager. * * @param lock a [[MemoryManager]] instance to synchronize on * @param memoryMode the type of memory tracked by this pool (on- or off-heap) */ private[memory] class ExecutionMemoryPool( lock: Object, memoryMode: MemoryMode ) extends MemoryPool(lock) with Logging { private[this] val poolName: String = memoryMode match { case MemoryMode.ON_HEAP => "on-heap execution" case MemoryMode.OFF_HEAP => "off-heap execution" } /** * Map from taskAttemptId -> memory consumption in bytes */ @GuardedBy("lock") private val memoryForTask = new mutable.HashMap[Long, Long]() override def memoryUsed: Long = lock.synchronized { return memoryForTask.values.sum } /** * Returns the memory consumption, in bytes, for the given task. */ def getMemoryUsageForTask(taskAttemptId: Long): Long = lock.synchronized { return memoryForTask.getOrElse(taskAttemptId, 0L) } /** * Try to acquire up to `numBytes` of memory for the given task and return the number of bytes * obtained, or 0 if none can be allocated. * * This call may block until there is enough free memory in some situations, to make sure each * task has a chance to ramp up to at least 1 / 2N of the total memory pool (where N is the # of * active tasks) before it is forced to spill. This can happen if the number of tasks increase * but an older task had a lot of memory already. * * @param numBytes number of bytes to acquire * @param taskAttemptId the task attempt acquiring memory * @param maybeGrowPool a callback that potentially grows the size of this pool. It takes in * one parameter (Long) that represents the desired amount of memory by * which this pool should be expanded. * @param computeMaxPoolSize a callback that returns the maximum allowable size of this pool * at this given moment. This is not a field because the max pool * size is variable in certain cases. For instance, in unified * memory management, the execution pool can be expanded by evicting * cached blocks, thereby shrinking the storage pool. * * @return the number of bytes granted to the task. */ private[memory] def acquireMemory( numBytes: Long, taskAttemptId: Long, maybeGrowPool: Long => Unit = (additionalSpaceNeeded: Long) => Unit, computeMaxPoolSize: () => Long = () => poolSize): Long = lock.synchronized { assert(numBytes > 0, s"invalid number of bytes requested: $numBytes") // TODO: clean up this clunky method signature // Add this task to the taskMemory map just so we can keep an accurate count of the number // of active tasks, to let other tasks ramp down their memory in calls to `acquireMemory` var curMem = memoryForTask.get(taskAttemptId) match { case Some(m) => m case _ => memoryForTask(taskAttemptId) = 0L // This will later cause waiting tasks to wake up and check numTasks again lock.notifyAll() 0L } // Keep looping until we're either sure that we don't want to grant this request (because this // task would have more than 1 / numActiveTasks of the memory) or we have enough free // memory to give it (we always let each task get at least 1 / (2 * numActiveTasks)). // TODO: simplify this to limit each task to its own slot while (true) { val numActiveTasks = memoryForTask.keys.size // In every iteration of this loop, we should first try to reclaim any borrowed execution // space from storage. This is necessary because of the potential race condition where new // storage blocks may steal the free execution memory that this task was waiting for. maybeGrowPool(numBytes - memoryFree) // Maximum size the pool would have after potentially growing the pool. // This is used to compute the upper bound of how much memory each task can occupy. This // must take into account potential free memory as well as the amount this pool currently // occupies. Otherwise, we may run into SPARK-12155 where, in unified memory management, // we did not take into account space that could have been freed by evicting cached blocks. val maxPoolSize = computeMaxPoolSize() val maxMemoryPerTask = maxPoolSize / numActiveTasks val minMemoryPerTask = poolSize / (2 * numActiveTasks) // How much we can grant this task; keep its share within 0 <= X <= 1 / numActiveTasks val maxToGrant = math.min(numBytes, math.max(0, maxMemoryPerTask - curMem)) // Only give it as much memory as is free, which might be none if it reached 1 / numTasks val toGrant = math.min(maxToGrant, memoryFree) // We want to let each task get at least 1 / (2 * numActiveTasks) before blocking; // if we can't give it this much now, wait for other tasks to free up memory // (this happens if older tasks allocated lots of memory before N grew) if (toGrant < numBytes && curMem + toGrant < minMemoryPerTask) { logInfo(s"TID $taskAttemptId waiting for at least 1/2N of $poolName pool to be free") lock.wait() curMem = memoryForTask(taskAttemptId) } else { memoryForTask(taskAttemptId) += toGrant return toGrant } } return 0L // Never reached } /** * Release `numBytes` of memory acquired by the given task. */ def releaseMemory(numBytes: Long, taskAttemptId: Long): Unit = lock.synchronized { val curMemOpt = memoryForTask.get(taskAttemptId) var curMem = curMemOpt match { case Some(m) => m case _ => 0L } var memoryToFree = if (curMem < numBytes) { val mem = curMem logWarning( s"Internal error: release called on $numBytes bytes but task only has $mem bytes " + s"of memory from the $poolName pool") curMem } else { numBytes } if (curMemOpt.isDefined) { curMem -= memoryToFree if (curMem > 0) { memoryForTask(taskAttemptId) = curMem } else { memoryForTask.remove(taskAttemptId) } } lock.notifyAll() // Notify waiters in acquireMemory() that memory has been freed } /** * Release all memory for the given task and mark it as inactive (e.g. when a task ends). * @return the number of bytes freed. */ def releaseAllMemoryForTask(taskAttemptId: Long): Long = lock.synchronized { val numBytesToFree = getMemoryUsageForTask(taskAttemptId) releaseMemory(numBytesToFree, taskAttemptId) return numBytesToFree } }
big-pegasus/spark
core/src/main/scala/org/apache/spark/memory/ExecutionMemoryPool.scala
Scala
apache-2.0
8,448
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.deploy.yarn import java.io.{FileSystem => _, _} import java.net.{InetAddress, UnknownHostException, URI, URL} import java.nio.ByteBuffer import java.nio.charset.StandardCharsets import java.nio.file.{Files, Paths} import java.util.{Locale, Properties, UUID} import java.util.zip.{ZipEntry, ZipOutputStream} import scala.collection.JavaConverters._ import scala.collection.immutable.{Map => IMap} import scala.collection.mutable.{ArrayBuffer, HashMap, HashSet, ListBuffer, Map} import scala.util.control.NonFatal import com.google.common.base.Objects import org.apache.hadoop.conf.Configuration import org.apache.hadoop.fs._ import org.apache.hadoop.fs.permission.FsPermission import org.apache.hadoop.io.Text import org.apache.hadoop.mapreduce.MRJobConfig import org.apache.hadoop.security.UserGroupInformation import org.apache.hadoop.util.StringUtils import org.apache.hadoop.yarn.api._ import org.apache.hadoop.yarn.api.ApplicationConstants.Environment import org.apache.hadoop.yarn.api.protocolrecords._ import org.apache.hadoop.yarn.api.records._ import org.apache.hadoop.yarn.client.api.{YarnClient, YarnClientApplication} import org.apache.hadoop.yarn.conf.YarnConfiguration import org.apache.hadoop.yarn.exceptions.ApplicationNotFoundException import org.apache.hadoop.yarn.security.AMRMTokenIdentifier import org.apache.hadoop.yarn.util.Records import org.apache.spark.{SecurityManager, SparkConf, SparkException} import org.apache.spark.api.python.PythonUtils import org.apache.spark.deploy.{SparkApplication, SparkHadoopUtil} import org.apache.spark.deploy.security.HadoopDelegationTokenManager import org.apache.spark.deploy.yarn.ResourceRequestHelper._ import org.apache.spark.deploy.yarn.config._ import org.apache.spark.internal.Logging import org.apache.spark.internal.config._ import org.apache.spark.internal.config.Python._ import org.apache.spark.launcher.{LauncherBackend, SparkAppHandle, YarnCommandBuilderUtils} import org.apache.spark.resource.ResourceProfile import org.apache.spark.rpc.RpcEnv import org.apache.spark.util.{CallerContext, Utils, YarnContainerInfoHelper} private[spark] class Client( val args: ClientArguments, val sparkConf: SparkConf, val rpcEnv: RpcEnv) extends Logging { import Client._ import YarnSparkHadoopUtil._ private val yarnClient = YarnClient.createYarnClient private val hadoopConf = new YarnConfiguration(SparkHadoopUtil.newConfiguration(sparkConf)) private val isClusterMode = sparkConf.get(SUBMIT_DEPLOY_MODE) == "cluster" private val isClientUnmanagedAMEnabled = sparkConf.get(YARN_UNMANAGED_AM) && !isClusterMode private var appMaster: ApplicationMaster = _ private var stagingDirPath: Path = _ // AM related configurations private val amMemory = if (isClusterMode) { sparkConf.get(DRIVER_MEMORY).toInt } else { sparkConf.get(AM_MEMORY).toInt } private val amMemoryOverhead = { val amMemoryOverheadEntry = if (isClusterMode) DRIVER_MEMORY_OVERHEAD else AM_MEMORY_OVERHEAD sparkConf.get(amMemoryOverheadEntry).getOrElse( math.max((MEMORY_OVERHEAD_FACTOR * amMemory).toLong, ResourceProfile.MEMORY_OVERHEAD_MIN_MIB)).toInt } private val amCores = if (isClusterMode) { sparkConf.get(DRIVER_CORES) } else { sparkConf.get(AM_CORES) } // Executor related configurations private val executorMemory = sparkConf.get(EXECUTOR_MEMORY) // Executor offHeap memory in MiB. protected val executorOffHeapMemory = Utils.executorOffHeapMemorySizeAsMb(sparkConf) private val executorMemoryOverhead = sparkConf.get(EXECUTOR_MEMORY_OVERHEAD).getOrElse( math.max((MEMORY_OVERHEAD_FACTOR * executorMemory).toLong, ResourceProfile.MEMORY_OVERHEAD_MIN_MIB)).toInt private val isPython = sparkConf.get(IS_PYTHON_APP) private val pysparkWorkerMemory: Int = if (isPython) { sparkConf.get(PYSPARK_EXECUTOR_MEMORY).map(_.toInt).getOrElse(0) } else { 0 } private val distCacheMgr = new ClientDistributedCacheManager() private val cachedResourcesConf = new SparkConf(false) private val keytab = sparkConf.get(KEYTAB).orNull private val amKeytabFileName: Option[String] = if (keytab != null && isClusterMode) { val principal = sparkConf.get(PRINCIPAL).orNull require((principal == null) == (keytab == null), "Both principal and keytab must be defined, or neither.") logInfo(s"Kerberos credentials: principal = $principal, keytab = $keytab") // Generate a file name that can be used for the keytab file, that does not conflict // with any user file. Some(new File(keytab).getName() + "-" + UUID.randomUUID().toString) } else { None } require(keytab == null || !Utils.isLocalUri(keytab), "Keytab should reference a local file.") private val launcherBackend = new LauncherBackend() { override protected def conf: SparkConf = sparkConf override def onStopRequest(): Unit = { if (isClusterMode && appId != null) { yarnClient.killApplication(appId) } else { setState(SparkAppHandle.State.KILLED) stop() } } } private val fireAndForget = isClusterMode && !sparkConf.get(WAIT_FOR_APP_COMPLETION) private var appId: ApplicationId = null def reportLauncherState(state: SparkAppHandle.State): Unit = { launcherBackend.setState(state) } def stop(): Unit = { if (appMaster != null) { appMaster.stopUnmanaged(stagingDirPath) } launcherBackend.close() yarnClient.stop() } /** * Submit an application running our ApplicationMaster to the ResourceManager. * * The stable Yarn API provides a convenience method (YarnClient#createApplication) for * creating applications and setting up the application submission context. This was not * available in the alpha API. */ def submitApplication(): ApplicationId = { ResourceRequestHelper.validateResources(sparkConf) var appId: ApplicationId = null try { launcherBackend.connect() yarnClient.init(hadoopConf) yarnClient.start() logInfo("Requesting a new application from cluster with %d NodeManagers" .format(yarnClient.getYarnClusterMetrics.getNumNodeManagers)) // Get a new application from our RM val newApp = yarnClient.createApplication() val newAppResponse = newApp.getNewApplicationResponse() appId = newAppResponse.getApplicationId() // The app staging dir based on the STAGING_DIR configuration if configured // otherwise based on the users home directory. // scalastyle:off FileSystemGet val appStagingBaseDir = sparkConf.get(STAGING_DIR) .map { new Path(_, UserGroupInformation.getCurrentUser.getShortUserName) } .getOrElse(FileSystem.get(hadoopConf).getHomeDirectory()) stagingDirPath = new Path(appStagingBaseDir, getAppStagingDir(appId)) // scalastyle:on FileSystemGet new CallerContext("CLIENT", sparkConf.get(APP_CALLER_CONTEXT), Option(appId.toString)).setCurrentContext() // Verify whether the cluster has enough resources for our AM verifyClusterResources(newAppResponse) // Set up the appropriate contexts to launch our AM val containerContext = createContainerLaunchContext(newAppResponse) val appContext = createApplicationSubmissionContext(newApp, containerContext) // Finally, submit and monitor the application logInfo(s"Submitting application $appId to ResourceManager") yarnClient.submitApplication(appContext) launcherBackend.setAppId(appId.toString) reportLauncherState(SparkAppHandle.State.SUBMITTED) appId } catch { case e: Throwable => if (stagingDirPath != null) { cleanupStagingDir() } throw e } } /** * Cleanup application staging directory. */ private def cleanupStagingDir(): Unit = { if (sparkConf.get(PRESERVE_STAGING_FILES)) { return } def cleanupStagingDirInternal(): Unit = { try { val fs = stagingDirPath.getFileSystem(hadoopConf) if (fs.delete(stagingDirPath, true)) { logInfo(s"Deleted staging directory $stagingDirPath") } } catch { case ioe: IOException => logWarning("Failed to cleanup staging dir " + stagingDirPath, ioe) } } cleanupStagingDirInternal() } /** * Set up the context for submitting our ApplicationMaster. * This uses the YarnClientApplication not available in the Yarn alpha API. */ def createApplicationSubmissionContext( newApp: YarnClientApplication, containerContext: ContainerLaunchContext): ApplicationSubmissionContext = { val componentName = if (isClusterMode) { config.YARN_DRIVER_RESOURCE_TYPES_PREFIX } else { config.YARN_AM_RESOURCE_TYPES_PREFIX } val yarnAMResources = getYarnResourcesAndAmounts(sparkConf, componentName) val amResources = yarnAMResources ++ getYarnResourcesFromSparkResources(SPARK_DRIVER_PREFIX, sparkConf) logDebug(s"AM resources: $amResources") val appContext = newApp.getApplicationSubmissionContext appContext.setApplicationName(sparkConf.get("spark.app.name", "Spark")) appContext.setQueue(sparkConf.get(QUEUE_NAME)) appContext.setAMContainerSpec(containerContext) appContext.setApplicationType(sparkConf.get(APPLICATION_TYPE)) sparkConf.get(APPLICATION_TAGS).foreach { tags => appContext.setApplicationTags(new java.util.HashSet[String](tags.asJava)) } sparkConf.get(MAX_APP_ATTEMPTS) match { case Some(v) => appContext.setMaxAppAttempts(v) case None => logDebug(s"${MAX_APP_ATTEMPTS.key} is not set. " + "Cluster's default value will be used.") } sparkConf.get(AM_ATTEMPT_FAILURE_VALIDITY_INTERVAL_MS).foreach { interval => appContext.setAttemptFailuresValidityInterval(interval) } val capability = Records.newRecord(classOf[Resource]) capability.setMemory(amMemory + amMemoryOverhead) capability.setVirtualCores(amCores) if (amResources.nonEmpty) { ResourceRequestHelper.setResourceRequests(amResources, capability) } logDebug(s"Created resource capability for AM request: $capability") sparkConf.get(AM_NODE_LABEL_EXPRESSION) match { case Some(expr) => val amRequest = Records.newRecord(classOf[ResourceRequest]) amRequest.setResourceName(ResourceRequest.ANY) amRequest.setPriority(Priority.newInstance(0)) amRequest.setCapability(capability) amRequest.setNumContainers(1) amRequest.setNodeLabelExpression(expr) appContext.setAMContainerResourceRequest(amRequest) case None => appContext.setResource(capability) } sparkConf.get(ROLLED_LOG_INCLUDE_PATTERN).foreach { includePattern => try { val logAggregationContext = Records.newRecord(classOf[LogAggregationContext]) logAggregationContext.setRolledLogsIncludePattern(includePattern) sparkConf.get(ROLLED_LOG_EXCLUDE_PATTERN).foreach { excludePattern => logAggregationContext.setRolledLogsExcludePattern(excludePattern) } appContext.setLogAggregationContext(logAggregationContext) } catch { case NonFatal(e) => logWarning(s"Ignoring ${ROLLED_LOG_INCLUDE_PATTERN.key} because the version of YARN " + "does not support it", e) } } appContext.setUnmanagedAM(isClientUnmanagedAMEnabled) sparkConf.get(APPLICATION_PRIORITY).foreach { appPriority => appContext.setPriority(Priority.newInstance(appPriority)) } appContext } /** * Set up security tokens for launching our ApplicationMaster container. * * In client mode, a set of credentials has been obtained by the scheduler, so they are copied * and sent to the AM. In cluster mode, new credentials are obtained and then sent to the AM, * along with whatever credentials the current user already has. */ private def setupSecurityToken(amContainer: ContainerLaunchContext): Unit = { val currentUser = UserGroupInformation.getCurrentUser() val credentials = currentUser.getCredentials() if (isClusterMode) { val credentialManager = new HadoopDelegationTokenManager(sparkConf, hadoopConf, null) credentialManager.obtainDelegationTokens(credentials) } val serializedCreds = SparkHadoopUtil.get.serialize(credentials) amContainer.setTokens(ByteBuffer.wrap(serializedCreds)) } /** Get the application report from the ResourceManager for an application we have submitted. */ def getApplicationReport(appId: ApplicationId): ApplicationReport = yarnClient.getApplicationReport(appId) /** * Return the security token used by this client to communicate with the ApplicationMaster. * If no security is enabled, the token returned by the report is null. */ private def getClientToken(report: ApplicationReport): String = Option(report.getClientToAMToken).map(_.toString).getOrElse("") /** * Fail fast if we have requested more resources per container than is available in the cluster. */ private def verifyClusterResources(newAppResponse: GetNewApplicationResponse): Unit = { val maxMem = newAppResponse.getMaximumResourceCapability().getMemory() logInfo("Verifying our application has not requested more than the maximum " + s"memory capability of the cluster ($maxMem MB per container)") val executorMem = executorMemory + executorOffHeapMemory + executorMemoryOverhead + pysparkWorkerMemory if (executorMem > maxMem) { throw new IllegalArgumentException(s"Required executor memory ($executorMemory MB), " + s"offHeap memory ($executorOffHeapMemory) MB, overhead ($executorMemoryOverhead MB), " + s"and PySpark memory ($pysparkWorkerMemory MB) is above the max threshold ($maxMem MB) " + "of this cluster! Please check the values of 'yarn.scheduler.maximum-allocation-mb' " + "and/or 'yarn.nodemanager.resource.memory-mb'.") } val amMem = amMemory + amMemoryOverhead if (amMem > maxMem) { throw new IllegalArgumentException(s"Required AM memory ($amMemory" + s"+$amMemoryOverhead MB) is above the max threshold ($maxMem MB) of this cluster! " + "Please check the values of 'yarn.scheduler.maximum-allocation-mb' and/or " + "'yarn.nodemanager.resource.memory-mb'.") } logInfo("Will allocate AM container, with %d MB memory including %d MB overhead".format( amMem, amMemoryOverhead)) // We could add checks to make sure the entire cluster has enough resources but that involves // getting all the node reports and computing ourselves. } /** * Copy the given file to a remote file system (e.g. HDFS) if needed. * The file is only copied if the source and destination file systems are different or the source * scheme is "file". This is used for preparing resources for launching the ApplicationMaster * container. Exposed for testing. */ private[yarn] def copyFileToRemote( destDir: Path, srcPath: Path, replication: Short, symlinkCache: Map[URI, Path], force: Boolean = false, destName: Option[String] = None): Path = { val destFs = destDir.getFileSystem(hadoopConf) val srcFs = srcPath.getFileSystem(hadoopConf) var destPath = srcPath if (force || !compareFs(srcFs, destFs) || "file".equals(srcFs.getScheme)) { destPath = new Path(destDir, destName.getOrElse(srcPath.getName())) logInfo(s"Uploading resource $srcPath -> $destPath") try { FileUtil.copy(srcFs, srcPath, destFs, destPath, false, hadoopConf) } catch { // HADOOP-16878 changes the behavior to throw exceptions when src equals to dest case e: PathOperationException if srcFs.makeQualified(srcPath).equals(destFs.makeQualified(destPath)) => } destFs.setReplication(destPath, replication) destFs.setPermission(destPath, new FsPermission(APP_FILE_PERMISSION)) } else { logInfo(s"Source and destination file systems are the same. Not copying $srcPath") } // Resolve any symlinks in the URI path so using a "current" symlink to point to a specific // version shows the specific version in the distributed cache configuration val qualifiedDestPath = destFs.makeQualified(destPath) val qualifiedDestDir = qualifiedDestPath.getParent val resolvedDestDir = symlinkCache.getOrElseUpdate(qualifiedDestDir.toUri(), { val fc = FileContext.getFileContext(qualifiedDestDir.toUri(), hadoopConf) fc.resolvePath(qualifiedDestDir) }) new Path(resolvedDestDir, qualifiedDestPath.getName()) } /** * Upload any resources to the distributed cache if needed. If a resource is intended to be * consumed locally, set up the appropriate config for downstream code to handle it properly. * This is used for setting up a container launch context for our ApplicationMaster. * Exposed for testing. */ def prepareLocalResources( destDir: Path, pySparkArchives: Seq[String]): HashMap[String, LocalResource] = { logInfo("Preparing resources for our AM container") // Upload Spark and the application JAR to the remote file system if necessary, // and add them as local resources to the application master. val fs = destDir.getFileSystem(hadoopConf) // Used to keep track of URIs added to the distributed cache. If the same URI is added // multiple times, YARN will fail to launch containers for the app with an internal // error. val distributedUris = new HashSet[String] // Used to keep track of URIs(files) added to the distribute cache have the same name. If // same name but different path files are added multiple time, YARN will fail to launch // containers for the app with an internal error. val distributedNames = new HashSet[String] val replication = sparkConf.get(STAGING_FILE_REPLICATION).map(_.toShort) .getOrElse(fs.getDefaultReplication(destDir)) val localResources = HashMap[String, LocalResource]() FileSystem.mkdirs(fs, destDir, new FsPermission(STAGING_DIR_PERMISSION)) val statCache: Map[URI, FileStatus] = HashMap[URI, FileStatus]() val symlinkCache: Map[URI, Path] = HashMap[URI, Path]() def addDistributedUri(uri: URI): Boolean = { val uriStr = uri.toString() val fileName = new File(uri.getPath).getName if (distributedUris.contains(uriStr)) { logWarning(s"Same path resource $uri added multiple times to distributed cache.") false } else if (distributedNames.contains(fileName)) { logWarning(s"Same name resource $uri added multiple times to distributed cache") false } else { distributedUris += uriStr distributedNames += fileName true } } /* * Distribute a file to the cluster. * * If the file's path is a "local:" URI, it's actually not distributed. Other files are copied * to HDFS (if not already there) and added to the application's distributed cache. * * @param path URI of the file to distribute. * @param resType Type of resource being distributed. * @param destName Name of the file in the distributed cache. * @param targetDir Subdirectory where to place the file. * @param appMasterOnly Whether to distribute only to the AM. * @return A 2-tuple. First item is whether the file is a "local:" URI. Second item is the * localized path for non-local paths, or the input `path` for local paths. * The localized path will be null if the URI has already been added to the cache. */ def distribute( path: String, resType: LocalResourceType = LocalResourceType.FILE, destName: Option[String] = None, targetDir: Option[String] = None, appMasterOnly: Boolean = false): (Boolean, String) = { val trimmedPath = path.trim() val localURI = Utils.resolveURI(trimmedPath) if (localURI.getScheme != Utils.LOCAL_SCHEME) { if (addDistributedUri(localURI)) { val localPath = getQualifiedLocalPath(localURI, hadoopConf) val linkname = targetDir.map(_ + "/").getOrElse("") + destName.orElse(Option(localURI.getFragment())).getOrElse(localPath.getName()) val destPath = copyFileToRemote(destDir, localPath, replication, symlinkCache) val destFs = FileSystem.get(destPath.toUri(), hadoopConf) distCacheMgr.addResource( destFs, hadoopConf, destPath, localResources, resType, linkname, statCache, appMasterOnly = appMasterOnly) (false, linkname) } else { (false, null) } } else { (true, trimmedPath) } } // If we passed in a keytab, make sure we copy the keytab to the staging directory on // HDFS, and setup the relevant environment vars, so the AM can login again. amKeytabFileName.foreach { kt => logInfo("To enable the AM to login from keytab, credentials are being copied over to the AM" + " via the YARN Secure Distributed Cache.") val (_, localizedPath) = distribute(keytab, destName = Some(kt), appMasterOnly = true) require(localizedPath != null, "Keytab file already distributed.") } // If we passed in a ivySettings file, make sure we copy the file to the distributed cache // in cluster mode so that the driver can access it val ivySettings = sparkConf.getOption("spark.jars.ivySettings") val ivySettingsLocalizedPath: Option[String] = ivySettings match { case Some(ivySettingsPath) if isClusterMode => val uri = new URI(ivySettingsPath) Option(uri.getScheme).getOrElse("file") match { case "file" => val ivySettingsFile = new File(uri.getPath) require(ivySettingsFile.exists(), s"Ivy settings file $ivySettingsFile not found") require(ivySettingsFile.isFile(), s"Ivy settings file $ivySettingsFile is not a" + "normal file") // Generate a file name that can be used for the ivySettings file, that does not // conflict with any user file. val localizedFileName = Some(ivySettingsFile.getName() + "-" + UUID.randomUUID().toString) val (_, localizedPath) = distribute(ivySettingsPath, destName = localizedFileName) require(localizedPath != null, "IvySettings file already distributed.") Some(localizedPath) case scheme => throw new IllegalArgumentException(s"Scheme $scheme not supported in " + "spark.jars.ivySettings") } case _ => None } /** * Add Spark to the cache. There are two settings that control what files to add to the cache: * - if a Spark archive is defined, use the archive. The archive is expected to contain * jar files at its root directory. * - if a list of jars is provided, filter the non-local ones, resolve globs, and * add the found files to the cache. * * Note that the archive cannot be a "local" URI. If none of the above settings are found, * then upload all files found in $SPARK_HOME/jars. */ val sparkArchive = sparkConf.get(SPARK_ARCHIVE) if (sparkArchive.isDefined) { val archive = sparkArchive.get require(!Utils.isLocalUri(archive), s"${SPARK_ARCHIVE.key} cannot be a local URI.") distribute(Utils.resolveURI(archive).toString, resType = LocalResourceType.ARCHIVE, destName = Some(LOCALIZED_LIB_DIR)) } else { sparkConf.get(SPARK_JARS) match { case Some(jars) => // Break the list of jars to upload, and resolve globs. val localJars = new ArrayBuffer[String]() jars.foreach { jar => if (!Utils.isLocalUri(jar)) { val path = getQualifiedLocalPath(Utils.resolveURI(jar), hadoopConf) val pathFs = FileSystem.get(path.toUri(), hadoopConf) val fss = pathFs.globStatus(path) if (fss == null) { throw new FileNotFoundException(s"Path ${path.toString} does not exist") } fss.filter(_.isFile()).foreach { entry => val uri = entry.getPath().toUri() statCache.update(uri, entry) distribute(uri.toString(), targetDir = Some(LOCALIZED_LIB_DIR)) } } else { localJars += jar } } // Propagate the local URIs to the containers using the configuration. sparkConf.set(SPARK_JARS, localJars.toSeq) case None => // No configuration, so fall back to uploading local jar files. logWarning(s"Neither ${SPARK_JARS.key} nor ${SPARK_ARCHIVE.key} is set, falling back " + "to uploading libraries under SPARK_HOME.") val jarsDir = new File(YarnCommandBuilderUtils.findJarsDir( sparkConf.getenv("SPARK_HOME"))) val jarsArchive = File.createTempFile(LOCALIZED_LIB_DIR, ".zip", new File(Utils.getLocalDir(sparkConf))) val jarsStream = new ZipOutputStream(new FileOutputStream(jarsArchive)) try { jarsStream.setLevel(0) jarsDir.listFiles().foreach { f => if (f.isFile && f.getName.toLowerCase(Locale.ROOT).endsWith(".jar") && f.canRead) { jarsStream.putNextEntry(new ZipEntry(f.getName)) Files.copy(f.toPath, jarsStream) jarsStream.closeEntry() } } } finally { jarsStream.close() } distribute(jarsArchive.toURI.getPath, resType = LocalResourceType.ARCHIVE, destName = Some(LOCALIZED_LIB_DIR)) jarsArchive.delete() } } /** * Copy user jar to the distributed cache if their scheme is not "local". * Otherwise, set the corresponding key in our SparkConf to handle it downstream. */ Option(args.userJar).filter(_.trim.nonEmpty).foreach { jar => val (isLocal, localizedPath) = distribute(jar, destName = Some(APP_JAR_NAME)) if (isLocal) { require(localizedPath != null, s"Path $jar already distributed") // If the resource is intended for local use only, handle this downstream // by setting the appropriate property sparkConf.set(APP_JAR, localizedPath) } } /** * Do the same for any additional resources passed in through ClientArguments. * Each resource category is represented by a 3-tuple of: * (1) comma separated list of resources in this category, * (2) resource type, and * (3) whether to add these resources to the classpath */ val cachedSecondaryJarLinks = ListBuffer.empty[String] List( (sparkConf.get(JARS_TO_DISTRIBUTE), LocalResourceType.FILE, true), (sparkConf.get(FILES_TO_DISTRIBUTE), LocalResourceType.FILE, false), (sparkConf.get(ARCHIVES_TO_DISTRIBUTE), LocalResourceType.ARCHIVE, false) ).foreach { case (flist, resType, addToClasspath) => flist.foreach { file => val (_, localizedPath) = distribute(file, resType = resType) // If addToClassPath, we ignore adding jar multiple times to distributed cache. if (addToClasspath) { if (localizedPath != null) { cachedSecondaryJarLinks += localizedPath } } else { if (localizedPath == null) { throw new IllegalArgumentException(s"Attempt to add ($file) multiple times" + " to the distributed cache.") } } } } if (cachedSecondaryJarLinks.nonEmpty) { sparkConf.set(SECONDARY_JARS, cachedSecondaryJarLinks.toSeq) } if (isClusterMode && args.primaryPyFile != null) { distribute(args.primaryPyFile, appMasterOnly = true) } pySparkArchives.foreach { f => val uri = Utils.resolveURI(f) if (uri.getScheme != Utils.LOCAL_SCHEME) { distribute(f) } } // The python files list needs to be treated especially. All files that are not an // archive need to be placed in a subdirectory that will be added to PYTHONPATH. sparkConf.get(PY_FILES).foreach { f => val targetDir = if (f.endsWith(".py")) Some(LOCALIZED_PYTHON_DIR) else None distribute(f, targetDir = targetDir) } // Update the configuration with all the distributed files, minus the conf archive. The // conf archive will be handled by the AM differently so that we avoid having to send // this configuration by other means. See SPARK-14602 for one reason of why this is needed. distCacheMgr.updateConfiguration(cachedResourcesConf) // Upload the conf archive to HDFS manually, and record its location in the configuration. // This will allow the AM to know where the conf archive is in HDFS, so that it can be // distributed to the containers. // // This code forces the archive to be copied, so that unit tests pass (since in that case both // file systems are the same and the archive wouldn't normally be copied). In most (all?) // deployments, the archive would be copied anyway, since it's a temp file in the local file // system. val remoteConfArchivePath = new Path(destDir, LOCALIZED_CONF_ARCHIVE) val remoteFs = FileSystem.get(remoteConfArchivePath.toUri(), hadoopConf) cachedResourcesConf.set(CACHED_CONF_ARCHIVE, remoteConfArchivePath.toString()) val confsToOverride = Map.empty[String, String] // If propagating the keytab to the AM, override the keytab name with the name of the // distributed file. amKeytabFileName.foreach { kt => confsToOverride.put(KEYTAB.key, kt) } // If propagating the ivySettings file to the distributed cache, override the ivySettings // file name with the name of the distributed file. ivySettingsLocalizedPath.foreach { path => confsToOverride.put("spark.jars.ivySettings", path) } val localConfArchive = new Path(createConfArchive(confsToOverride).toURI()) copyFileToRemote(destDir, localConfArchive, replication, symlinkCache, force = true, destName = Some(LOCALIZED_CONF_ARCHIVE)) // Manually add the config archive to the cache manager so that the AM is launched with // the proper files set up. distCacheMgr.addResource( remoteFs, hadoopConf, remoteConfArchivePath, localResources, LocalResourceType.ARCHIVE, LOCALIZED_CONF_DIR, statCache, appMasterOnly = false) localResources } /** * Create an archive with the config files for distribution. * * These will be used by AM and executors. The files are zipped and added to the job as an * archive, so that YARN will explode it when distributing to AM and executors. This directory * is then added to the classpath of AM and executor process, just to make sure that everybody * is using the same default config. * * This follows the order of precedence set by the startup scripts, in which HADOOP_CONF_DIR * shows up in the classpath before YARN_CONF_DIR. * * Currently this makes a shallow copy of the conf directory. If there are cases where a * Hadoop config directory contains subdirectories, this code will have to be fixed. * * The archive also contains some Spark configuration. Namely, it saves the contents of * SparkConf in a file to be loaded by the AM process. * * @param confsToOverride configs that should overriden when creating the final spark conf file */ private def createConfArchive(confsToOverride: Map[String, String]): File = { val hadoopConfFiles = new HashMap[String, File]() // SPARK_CONF_DIR shows up in the classpath before HADOOP_CONF_DIR/YARN_CONF_DIR sys.env.get("SPARK_CONF_DIR").foreach { localConfDir => val dir = new File(localConfDir) if (dir.isDirectory) { val files = dir.listFiles(new FileFilter { override def accept(pathname: File): Boolean = { pathname.isFile && pathname.getName.endsWith(".xml") } }) files.foreach { f => hadoopConfFiles(f.getName) = f } } } // SPARK-23630: during testing, Spark scripts filter out hadoop conf dirs so that user's // environments do not interfere with tests. This allows a special env variable during // tests so that custom conf dirs can be used by unit tests. val confDirs = Seq("HADOOP_CONF_DIR", "YARN_CONF_DIR") ++ (if (Utils.isTesting) Seq("SPARK_TEST_HADOOP_CONF_DIR") else Nil) confDirs.foreach { envKey => sys.env.get(envKey).foreach { path => val dir = new File(path) if (dir.isDirectory()) { val files = dir.listFiles() if (files == null) { logWarning("Failed to list files under directory " + dir) } else { files.foreach { file => if (file.isFile && !hadoopConfFiles.contains(file.getName())) { hadoopConfFiles(file.getName()) = file } } } } } } val confArchive = File.createTempFile(LOCALIZED_CONF_DIR, ".zip", new File(Utils.getLocalDir(sparkConf))) val confStream = new ZipOutputStream(new FileOutputStream(confArchive)) logDebug(s"Creating an archive with the config files for distribution at $confArchive.") try { confStream.setLevel(0) // Upload $SPARK_CONF_DIR/log4j.properties file to the distributed cache to make sure that // the executors will use the latest configurations instead of the default values. This is // required when user changes log4j.properties directly to set the log configurations. If // configuration file is provided through --files then executors will be taking configurations // from --files instead of $SPARK_CONF_DIR/log4j.properties. // Also upload metrics.properties to distributed cache if exists in classpath. // If user specify this file using --files then executors will use the one // from --files instead. for { prop <- Seq("log4j.properties", "metrics.properties") url <- Option(Utils.getContextOrSparkClassLoader.getResource(prop)) if url.getProtocol == "file" } { val file = new File(url.getPath()) confStream.putNextEntry(new ZipEntry(file.getName())) Files.copy(file.toPath, confStream) confStream.closeEntry() } // Save the Hadoop config files under a separate directory in the archive. This directory // is appended to the classpath so that the cluster-provided configuration takes precedence. confStream.putNextEntry(new ZipEntry(s"$LOCALIZED_HADOOP_CONF_DIR/")) confStream.closeEntry() hadoopConfFiles.foreach { case (name, file) => if (file.canRead()) { confStream.putNextEntry(new ZipEntry(s"$LOCALIZED_HADOOP_CONF_DIR/$name")) Files.copy(file.toPath, confStream) confStream.closeEntry() } } // Save the YARN configuration into a separate file that will be overlayed on top of the // cluster's Hadoop conf. confStream.putNextEntry(new ZipEntry(SparkHadoopUtil.SPARK_HADOOP_CONF_FILE)) hadoopConf.writeXml(confStream) confStream.closeEntry() // Save Spark configuration to a file in the archive. val props = confToProperties(sparkConf) confsToOverride.foreach { case (k, v) => props.setProperty(k, v)} writePropertiesToArchive(props, SPARK_CONF_FILE, confStream) // Write the distributed cache config to the archive. writePropertiesToArchive(confToProperties(cachedResourcesConf), DIST_CACHE_CONF_FILE, confStream) } finally { confStream.close() } confArchive } /** * Set up the environment for launching our ApplicationMaster container. */ private def setupLaunchEnv( stagingDirPath: Path, pySparkArchives: Seq[String]): HashMap[String, String] = { logInfo("Setting up the launch environment for our AM container") val env = new HashMap[String, String]() populateClasspath(args, hadoopConf, sparkConf, env, sparkConf.get(DRIVER_CLASS_PATH)) env("SPARK_YARN_STAGING_DIR") = stagingDirPath.toString env("SPARK_USER") = UserGroupInformation.getCurrentUser().getShortUserName() // Pick up any environment variables for the AM provided through spark.yarn.appMasterEnv.* val amEnvPrefix = "spark.yarn.appMasterEnv." sparkConf.getAll .filter { case (k, v) => k.startsWith(amEnvPrefix) } .map { case (k, v) => (k.substring(amEnvPrefix.length), v) } .foreach { case (k, v) => YarnSparkHadoopUtil.addPathToEnvironment(env, k, v) } // If pyFiles contains any .py files, we need to add LOCALIZED_PYTHON_DIR to the PYTHONPATH // of the container processes too. Add all non-.py files directly to PYTHONPATH. // // NOTE: the code currently does not handle .py files defined with a "local:" scheme. val pythonPath = new ListBuffer[String]() val (pyFiles, pyArchives) = sparkConf.get(PY_FILES).partition(_.endsWith(".py")) if (pyFiles.nonEmpty) { pythonPath += buildPath(Environment.PWD.$$(), LOCALIZED_PYTHON_DIR) } (pySparkArchives ++ pyArchives).foreach { path => val uri = Utils.resolveURI(path) if (uri.getScheme != Utils.LOCAL_SCHEME) { pythonPath += buildPath(Environment.PWD.$$(), new Path(uri).getName()) } else { pythonPath += uri.getPath() } } // Finally, update the Spark config to propagate PYTHONPATH to the AM and executors. if (pythonPath.nonEmpty) { val pythonPathList = (sys.env.get("PYTHONPATH") ++ pythonPath) env("PYTHONPATH") = (env.get("PYTHONPATH") ++ pythonPathList) .mkString(ApplicationConstants.CLASS_PATH_SEPARATOR) val pythonPathExecutorEnv = (sparkConf.getExecutorEnv.toMap.get("PYTHONPATH") ++ pythonPathList).mkString(ApplicationConstants.CLASS_PATH_SEPARATOR) sparkConf.setExecutorEnv("PYTHONPATH", pythonPathExecutorEnv) } if (isClusterMode) { // propagate PYSPARK_DRIVER_PYTHON and PYSPARK_PYTHON to driver in cluster mode Seq("PYSPARK_DRIVER_PYTHON", "PYSPARK_PYTHON").foreach { envname => if (!env.contains(envname)) { sys.env.get(envname).foreach(env(envname) = _) } } sys.env.get("PYTHONHASHSEED").foreach(env.put("PYTHONHASHSEED", _)) } sys.env.get(ENV_DIST_CLASSPATH).foreach { dcp => env(ENV_DIST_CLASSPATH) = dcp } env } /** * Set up a ContainerLaunchContext to launch our ApplicationMaster container. * This sets up the launch environment, java options, and the command for launching the AM. */ private def createContainerLaunchContext(newAppResponse: GetNewApplicationResponse) : ContainerLaunchContext = { logInfo("Setting up container launch context for our AM") val appId = newAppResponse.getApplicationId val pySparkArchives = if (sparkConf.get(IS_PYTHON_APP)) { findPySparkArchives() } else { Nil } val launchEnv = setupLaunchEnv(stagingDirPath, pySparkArchives) val localResources = prepareLocalResources(stagingDirPath, pySparkArchives) val amContainer = Records.newRecord(classOf[ContainerLaunchContext]) amContainer.setLocalResources(localResources.asJava) amContainer.setEnvironment(launchEnv.asJava) val javaOpts = ListBuffer[String]() // Set the environment variable through a command prefix // to append to the existing value of the variable var prefixEnv: Option[String] = None // Add Xmx for AM memory javaOpts += "-Xmx" + amMemory + "m" val tmpDir = new Path(Environment.PWD.$$(), YarnConfiguration.DEFAULT_CONTAINER_TEMP_DIR) javaOpts += "-Djava.io.tmpdir=" + tmpDir // TODO: Remove once cpuset version is pushed out. // The context is, default gc for server class machines ends up using all cores to do gc - // hence if there are multiple containers in same node, Spark GC affects all other containers' // performance (which can be that of other Spark containers) // Instead of using this, rely on cpusets by YARN to enforce "proper" Spark behavior in // multi-tenant environments. Not sure how default Java GC behaves if it is limited to subset // of cores on a node. val useConcurrentAndIncrementalGC = launchEnv.get("SPARK_USE_CONC_INCR_GC").exists(_.toBoolean) if (useConcurrentAndIncrementalGC) { // In our expts, using (default) throughput collector has severe perf ramifications in // multi-tenant machines javaOpts += "-XX:+UseConcMarkSweepGC" javaOpts += "-XX:MaxTenuringThreshold=31" javaOpts += "-XX:SurvivorRatio=8" javaOpts += "-XX:+CMSIncrementalMode" javaOpts += "-XX:+CMSIncrementalPacing" javaOpts += "-XX:CMSIncrementalDutyCycleMin=0" javaOpts += "-XX:CMSIncrementalDutyCycle=10" } // Include driver-specific java options if we are launching a driver if (isClusterMode) { sparkConf.get(DRIVER_JAVA_OPTIONS).foreach { opts => javaOpts ++= Utils.splitCommandString(opts) .map(Utils.substituteAppId(_, appId.toString)) .map(YarnSparkHadoopUtil.escapeForShell) } val libraryPaths = Seq(sparkConf.get(DRIVER_LIBRARY_PATH), sys.props.get("spark.driver.libraryPath")).flatten if (libraryPaths.nonEmpty) { prefixEnv = Some(createLibraryPathPrefix(libraryPaths.mkString(File.pathSeparator), sparkConf)) } if (sparkConf.get(AM_JAVA_OPTIONS).isDefined) { logWarning(s"${AM_JAVA_OPTIONS.key} will not take effect in cluster mode") } } else { // Validate and include yarn am specific java options in yarn-client mode. sparkConf.get(AM_JAVA_OPTIONS).foreach { opts => if (opts.contains("-Dspark")) { val msg = s"${AM_JAVA_OPTIONS.key} is not allowed to set Spark options (was '$opts')." throw new SparkException(msg) } if (opts.contains("-Xmx")) { val msg = s"${AM_JAVA_OPTIONS.key} is not allowed to specify max heap memory settings " + s"(was '$opts'). Use spark.yarn.am.memory instead." throw new SparkException(msg) } javaOpts ++= Utils.splitCommandString(opts) .map(Utils.substituteAppId(_, appId.toString)) .map(YarnSparkHadoopUtil.escapeForShell) } sparkConf.get(AM_LIBRARY_PATH).foreach { paths => prefixEnv = Some(createLibraryPathPrefix(paths, sparkConf)) } } // For log4j configuration to reference javaOpts += ("-Dspark.yarn.app.container.log.dir=" + ApplicationConstants.LOG_DIR_EXPANSION_VAR) val userClass = if (isClusterMode) { Seq("--class", YarnSparkHadoopUtil.escapeForShell(args.userClass)) } else { Nil } val userJar = if (args.userJar != null) { Seq("--jar", args.userJar) } else { Nil } val primaryPyFile = if (isClusterMode && args.primaryPyFile != null) { Seq("--primary-py-file", new Path(args.primaryPyFile).getName()) } else { Nil } val primaryRFile = if (args.primaryRFile != null) { Seq("--primary-r-file", args.primaryRFile) } else { Nil } val amClass = if (isClusterMode) { Utils.classForName("org.apache.spark.deploy.yarn.ApplicationMaster").getName } else { Utils.classForName("org.apache.spark.deploy.yarn.ExecutorLauncher").getName } if (args.primaryRFile != null && (args.primaryRFile.endsWith(".R") || args.primaryRFile.endsWith(".r"))) { args.userArgs = ArrayBuffer(args.primaryRFile) ++ args.userArgs } val userArgs = args.userArgs.flatMap { arg => Seq("--arg", YarnSparkHadoopUtil.escapeForShell(arg)) } val amArgs = Seq(amClass) ++ userClass ++ userJar ++ primaryPyFile ++ primaryRFile ++ userArgs ++ Seq("--properties-file", buildPath(Environment.PWD.$$(), LOCALIZED_CONF_DIR, SPARK_CONF_FILE)) ++ Seq("--dist-cache-conf", buildPath(Environment.PWD.$$(), LOCALIZED_CONF_DIR, DIST_CACHE_CONF_FILE)) // Command for the ApplicationMaster val commands = prefixEnv ++ Seq(Environment.JAVA_HOME.$$() + "/bin/java", "-server") ++ javaOpts ++ amArgs ++ Seq( "1>", ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stdout", "2>", ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stderr") // TODO: it would be nicer to just make sure there are no null commands here val printableCommands = commands.map(s => if (s == null) "null" else s).toList amContainer.setCommands(printableCommands.asJava) logDebug("===============================================================================") logDebug("YARN AM launch context:") logDebug(s" user class: ${Option(args.userClass).getOrElse("N/A")}") logDebug(" env:") if (log.isDebugEnabled) { Utils.redact(sparkConf, launchEnv.toSeq).foreach { case (k, v) => logDebug(s" $k -> $v") } } logDebug(" resources:") localResources.foreach { case (k, v) => logDebug(s" $k -> $v")} logDebug(" command:") logDebug(s" ${printableCommands.mkString(" ")}") logDebug("===============================================================================") // send the acl settings into YARN to control who has access via YARN interfaces val securityManager = new SecurityManager(sparkConf) amContainer.setApplicationACLs( YarnSparkHadoopUtil.getApplicationAclsForYarn(securityManager).asJava) setupSecurityToken(amContainer) amContainer } /** * Report the state of an application until it has exited, either successfully or * due to some failure, then return a pair of the yarn application state (FINISHED, FAILED, * KILLED, or RUNNING) and the final application state (UNDEFINED, SUCCEEDED, FAILED, * or KILLED). * * @param appId ID of the application to monitor. * @param returnOnRunning Whether to also return the application state when it is RUNNING. * @param logApplicationReport Whether to log details of the application report every iteration. * @param interval How often to poll the YARN RM for application status (in ms). * @return A pair of the yarn application state and the final application state. */ def monitorApplication( appId: ApplicationId, returnOnRunning: Boolean = false, logApplicationReport: Boolean = true, interval: Long = sparkConf.get(REPORT_INTERVAL)): YarnAppReport = { var lastState: YarnApplicationState = null while (true) { Thread.sleep(interval) val report: ApplicationReport = try { getApplicationReport(appId) } catch { case e: ApplicationNotFoundException => logError(s"Application $appId not found.") cleanupStagingDir() return YarnAppReport(YarnApplicationState.KILLED, FinalApplicationStatus.KILLED, None) case NonFatal(e) if !e.isInstanceOf[InterruptedIOException] => val msg = s"Failed to contact YARN for application $appId." logError(msg, e) // Don't necessarily clean up staging dir because status is unknown return YarnAppReport(YarnApplicationState.FAILED, FinalApplicationStatus.FAILED, Some(msg)) } val state = report.getYarnApplicationState if (logApplicationReport) { logInfo(s"Application report for $appId (state: $state)") // If DEBUG is enabled, log report details every iteration // Otherwise, log them every time the application changes state if (log.isDebugEnabled) { logDebug(formatReportDetails(report, getDriverLogsLink(report))) } else if (lastState != state) { logInfo(formatReportDetails(report, getDriverLogsLink(report))) } } if (lastState != state) { state match { case YarnApplicationState.RUNNING => reportLauncherState(SparkAppHandle.State.RUNNING) case YarnApplicationState.FINISHED => report.getFinalApplicationStatus match { case FinalApplicationStatus.FAILED => reportLauncherState(SparkAppHandle.State.FAILED) case FinalApplicationStatus.KILLED => reportLauncherState(SparkAppHandle.State.KILLED) case _ => reportLauncherState(SparkAppHandle.State.FINISHED) } case YarnApplicationState.FAILED => reportLauncherState(SparkAppHandle.State.FAILED) case YarnApplicationState.KILLED => reportLauncherState(SparkAppHandle.State.KILLED) case _ => } } if (state == YarnApplicationState.FINISHED || state == YarnApplicationState.FAILED || state == YarnApplicationState.KILLED) { cleanupStagingDir() return createAppReport(report) } if (returnOnRunning && state == YarnApplicationState.RUNNING) { return createAppReport(report) } if (state == YarnApplicationState.ACCEPTED && isClientUnmanagedAMEnabled && appMaster == null && report.getAMRMToken != null) { appMaster = startApplicationMasterService(report) } lastState = state } // Never reached, but keeps compiler happy throw new SparkException("While loop is depleted! This should never happen...") } private def startApplicationMasterService(report: ApplicationReport): ApplicationMaster = { // Add AMRMToken to establish connection between RM and AM val token = report.getAMRMToken val amRMToken: org.apache.hadoop.security.token.Token[AMRMTokenIdentifier] = new org.apache.hadoop.security.token.Token[AMRMTokenIdentifier]( token.getIdentifier().array(), token.getPassword().array, new Text(token.getKind()), new Text(token.getService())) val currentUGI = UserGroupInformation.getCurrentUser currentUGI.addToken(amRMToken) // Start Application Service in a separate thread and continue with application monitoring val appMaster = new ApplicationMaster( new ApplicationMasterArguments(Array.empty), sparkConf, hadoopConf) val amService = new Thread("Unmanaged Application Master Service") { override def run(): Unit = { appMaster.runUnmanaged(rpcEnv, report.getCurrentApplicationAttemptId, stagingDirPath, cachedResourcesConf) } } amService.setDaemon(true) amService.start() appMaster } /** * Format an application report and optionally, links to driver logs, in a human-friendly manner. * * @param report The application report from YARN. * @param driverLogsLinks A map of driver log files and their links. Keys are the file names * (e.g. `stdout`), and values are the links. If empty, nothing will be * printed. * @return Human-readable version of the input data. */ private def formatReportDetails(report: ApplicationReport, driverLogsLinks: IMap[String, String]): String = { val details = Seq[(String, String)]( ("client token", getClientToken(report)), ("diagnostics", report.getDiagnostics), ("ApplicationMaster host", report.getHost), ("ApplicationMaster RPC port", report.getRpcPort.toString), ("queue", report.getQueue), ("start time", report.getStartTime.toString), ("final status", report.getFinalApplicationStatus.toString), ("tracking URL", report.getTrackingUrl), ("user", report.getUser) ) ++ driverLogsLinks.map { case (fname, link) => (s"Driver Logs ($fname)", link) } // Use more loggable format if value is null or empty details.map { case (k, v) => val newValue = Option(v).filter(_.nonEmpty).getOrElse("N/A") s"\\n\\t $k: $newValue" }.mkString("") } /** * Fetch links to the logs of the driver for the given application report. This requires * query the ResourceManager via RPC. Returns an empty map if the links could not be fetched. * If this feature is disabled via [[CLIENT_INCLUDE_DRIVER_LOGS_LINK]], or if the application * report indicates that the driver container isn't currently running, an empty map is * returned immediately. */ private def getDriverLogsLink(appReport: ApplicationReport): IMap[String, String] = { if (!sparkConf.get(CLIENT_INCLUDE_DRIVER_LOGS_LINK) || appReport.getYarnApplicationState != YarnApplicationState.RUNNING) { return IMap.empty } try { Option(appReport.getCurrentApplicationAttemptId) .flatMap(attemptId => Option(yarnClient.getApplicationAttemptReport(attemptId))) .flatMap(attemptReport => Option(attemptReport.getAMContainerId)) .flatMap(amContainerId => Option(yarnClient.getContainerReport(amContainerId))) .flatMap(containerReport => Option(containerReport.getLogUrl)) .map(YarnContainerInfoHelper.getLogUrlsFromBaseUrl) .getOrElse(IMap.empty) } catch { case e: Exception => logWarning(s"Unable to get driver log links for $appId: $e") // Include the full stack trace only at DEBUG level to reduce verbosity logDebug(s"Unable to get driver log links for $appId", e) IMap.empty } } /** * Submit an application to the ResourceManager. * If set spark.yarn.submit.waitAppCompletion to true, it will stay alive * reporting the application's status until the application has exited for any reason. * Otherwise, the client process will exit after submission. * If the application finishes with a failed, killed, or undefined status, * throw an appropriate SparkException. */ def run(): Unit = { this.appId = submitApplication() if (!launcherBackend.isConnected() && fireAndForget) { val report = getApplicationReport(appId) val state = report.getYarnApplicationState logInfo(s"Application report for $appId (state: $state)") logInfo(formatReportDetails(report, getDriverLogsLink(report))) if (state == YarnApplicationState.FAILED || state == YarnApplicationState.KILLED) { throw new SparkException(s"Application $appId finished with status: $state") } } else { val YarnAppReport(appState, finalState, diags) = monitorApplication(appId) if (appState == YarnApplicationState.FAILED || finalState == FinalApplicationStatus.FAILED) { diags.foreach { err => logError(s"Application diagnostics message: $err") } throw new SparkException(s"Application $appId finished with failed status") } if (appState == YarnApplicationState.KILLED || finalState == FinalApplicationStatus.KILLED) { throw new SparkException(s"Application $appId is killed") } if (finalState == FinalApplicationStatus.UNDEFINED) { throw new SparkException(s"The final status of application $appId is undefined") } } } private def findPySparkArchives(): Seq[String] = { sys.env.get("PYSPARK_ARCHIVES_PATH") .map(_.split(",").toSeq) .getOrElse { val pyLibPath = Seq(sys.env("SPARK_HOME"), "python", "lib").mkString(File.separator) val pyArchivesFile = new File(pyLibPath, "pyspark.zip") require(pyArchivesFile.exists(), s"$pyArchivesFile not found; cannot run pyspark application in YARN mode.") val py4jFile = new File(pyLibPath, PythonUtils.PY4J_ZIP_NAME) require(py4jFile.exists(), s"$py4jFile not found; cannot run pyspark application in YARN mode.") Seq(pyArchivesFile.getAbsolutePath(), py4jFile.getAbsolutePath()) } } } private[spark] object Client extends Logging { // Alias for the user jar val APP_JAR_NAME: String = "__app__.jar" // Staging directory for any temporary jars or files val SPARK_STAGING: String = ".sparkStaging" // Staging directory is private! -> rwx-------- val STAGING_DIR_PERMISSION: FsPermission = FsPermission.createImmutable(Integer.parseInt("700", 8).toShort) // App files are world-wide readable and owner writable -> rw-r--r-- val APP_FILE_PERMISSION: FsPermission = FsPermission.createImmutable(Integer.parseInt("644", 8).toShort) // Distribution-defined classpath to add to processes val ENV_DIST_CLASSPATH = "SPARK_DIST_CLASSPATH" // Subdirectory where the user's Spark and Hadoop config files will be placed. val LOCALIZED_CONF_DIR = "__spark_conf__" // Subdirectory in the conf directory containing Hadoop config files. val LOCALIZED_HADOOP_CONF_DIR = "__hadoop_conf__" // File containing the conf archive in the AM. See prepareLocalResources(). val LOCALIZED_CONF_ARCHIVE = LOCALIZED_CONF_DIR + ".zip" // Name of the file in the conf archive containing Spark configuration. val SPARK_CONF_FILE = "__spark_conf__.properties" // Name of the file in the conf archive containing the distributed cache info. val DIST_CACHE_CONF_FILE = "__spark_dist_cache__.properties" // Subdirectory where the user's python files (not archives) will be placed. val LOCALIZED_PYTHON_DIR = "__pyfiles__" // Subdirectory where Spark libraries will be placed. val LOCALIZED_LIB_DIR = "__spark_libs__" /** * Return the path to the given application's staging directory. */ private def getAppStagingDir(appId: ApplicationId): String = { buildPath(SPARK_STAGING, appId.toString()) } /** * Populate the classpath entry in the given environment map with any application * classpath specified through the Hadoop and Yarn configurations. */ private[yarn] def populateHadoopClasspath(conf: Configuration, env: HashMap[String, String]) : Unit = { val classPathElementsToAdd = getYarnAppClasspath(conf) ++ getMRAppClasspath(conf) classPathElementsToAdd.foreach { c => YarnSparkHadoopUtil.addPathToEnvironment(env, Environment.CLASSPATH.name, c.trim) } } private def getYarnAppClasspath(conf: Configuration): Seq[String] = Option(conf.getStrings(YarnConfiguration.YARN_APPLICATION_CLASSPATH)) match { case Some(s) => s.toSeq case None => getDefaultYarnApplicationClasspath } private def getMRAppClasspath(conf: Configuration): Seq[String] = Option(conf.getStrings("mapreduce.application.classpath")) match { case Some(s) => s.toSeq case None => getDefaultMRApplicationClasspath } private[yarn] def getDefaultYarnApplicationClasspath: Seq[String] = YarnConfiguration.DEFAULT_YARN_APPLICATION_CLASSPATH.toSeq private[yarn] def getDefaultMRApplicationClasspath: Seq[String] = StringUtils.getStrings(MRJobConfig.DEFAULT_MAPREDUCE_APPLICATION_CLASSPATH).toSeq /** * Populate the classpath entry in the given environment map. * * User jars are generally not added to the JVM's system classpath; those are handled by the AM * and executor backend. When the deprecated `spark.yarn.user.classpath.first` is used, user jars * are included in the system classpath, though. The extra class path and other uploaded files are * always made available through the system class path. * * @param args Client arguments (when starting the AM) or null (when starting executors). */ private[yarn] def populateClasspath( args: ClientArguments, conf: Configuration, sparkConf: SparkConf, env: HashMap[String, String], extraClassPath: Option[String] = None): Unit = { extraClassPath.foreach { cp => addClasspathEntry(getClusterPath(sparkConf, cp), env) } addClasspathEntry(Environment.PWD.$$(), env) addClasspathEntry(Environment.PWD.$$() + Path.SEPARATOR + LOCALIZED_CONF_DIR, env) if (sparkConf.get(USER_CLASS_PATH_FIRST)) { // in order to properly add the app jar when user classpath is first // we have to do the mainJar separate in order to send the right thing // into addFileToClasspath val mainJar = if (args != null) { getMainJarUri(Option(args.userJar)) } else { getMainJarUri(sparkConf.get(APP_JAR)) } mainJar.foreach(addFileToClasspath(sparkConf, conf, _, APP_JAR_NAME, env)) val secondaryJars = if (args != null) { getSecondaryJarUris(Option(sparkConf.get(JARS_TO_DISTRIBUTE))) } else { getSecondaryJarUris(sparkConf.get(SECONDARY_JARS)) } secondaryJars.foreach { x => addFileToClasspath(sparkConf, conf, x, null, env) } } // Add the Spark jars to the classpath, depending on how they were distributed. addClasspathEntry(buildPath(Environment.PWD.$$(), LOCALIZED_LIB_DIR, "*"), env) if (sparkConf.get(SPARK_ARCHIVE).isEmpty) { sparkConf.get(SPARK_JARS).foreach { jars => jars.filter(Utils.isLocalUri).foreach { jar => val uri = new URI(jar) addClasspathEntry(getClusterPath(sparkConf, uri.getPath()), env) } } } if (sparkConf.get(POPULATE_HADOOP_CLASSPATH)) { populateHadoopClasspath(conf, env) } sys.env.get(ENV_DIST_CLASSPATH).foreach { cp => addClasspathEntry(getClusterPath(sparkConf, cp), env) } // Add the localized Hadoop config at the end of the classpath, in case it contains other // files (such as configuration files for different services) that are not part of the // YARN cluster's config. addClasspathEntry( buildPath(Environment.PWD.$$(), LOCALIZED_CONF_DIR, LOCALIZED_HADOOP_CONF_DIR), env) } /** * Returns a list of URIs representing the user classpath. * * @param conf Spark configuration. */ def getUserClasspath(conf: SparkConf): Array[URI] = { val mainUri = getMainJarUri(conf.get(APP_JAR)) val secondaryUris = getSecondaryJarUris(conf.get(SECONDARY_JARS)) (mainUri ++ secondaryUris).toArray } /** * Returns a list of local, absolute file URLs representing the user classpath. Note that this * must be executed on the same host which will access the URLs, as it will resolve relative * paths based on the current working directory. * * @param conf Spark configuration. * @param useClusterPath Whether to use the 'cluster' path when resolving paths with the * `local` scheme. This should be used when running on the cluster, but * not when running on the gateway (i.e. for the driver in `client` mode). * @return Array of local URLs ready to be passed to a [[java.net.URLClassLoader]]. */ def getUserClasspathUrls(conf: SparkConf, useClusterPath: Boolean): Array[URL] = { Client.getUserClasspath(conf).map { uri => val inputPath = uri.getPath val replacedFilePath = if (Utils.isLocalUri(uri.toString) && useClusterPath) { Client.getClusterPath(conf, inputPath) } else { // Any other URI schemes should have been resolved by this point assert(uri.getScheme == null || uri.getScheme == "file" || Utils.isLocalUri(uri.toString), "getUserClasspath should only return 'file' or 'local' URIs but found: " + uri) inputPath } Paths.get(replacedFilePath).toAbsolutePath.toUri.toURL } } private def getMainJarUri(mainJar: Option[String]): Option[URI] = { mainJar.flatMap { path => val uri = Utils.resolveURI(path) if (uri.getScheme == Utils.LOCAL_SCHEME) Some(uri) else None }.orElse(Some(new URI(APP_JAR_NAME))) } private def getSecondaryJarUris(secondaryJars: Option[Seq[String]]): Seq[URI] = { secondaryJars.getOrElse(Nil).map(new URI(_)) } /** * Adds the given path to the classpath, handling "local:" URIs correctly. * * If an alternate name for the file is given, and it's not a "local:" file, the alternate * name will be added to the classpath (relative to the job's work directory). * * If not a "local:" file and no alternate name, the linkName will be added to the classpath. * * @param conf Spark configuration. * @param hadoopConf Hadoop configuration. * @param uri URI to add to classpath (optional). * @param fileName Alternate name for the file (optional). * @param env Map holding the environment variables. */ private def addFileToClasspath( conf: SparkConf, hadoopConf: Configuration, uri: URI, fileName: String, env: HashMap[String, String]): Unit = { if (uri != null && uri.getScheme == Utils.LOCAL_SCHEME) { addClasspathEntry(getClusterPath(conf, uri.getPath), env) } else if (fileName != null) { addClasspathEntry(buildPath(Environment.PWD.$$(), fileName), env) } else if (uri != null) { val localPath = getQualifiedLocalPath(uri, hadoopConf) val linkName = Option(uri.getFragment()).getOrElse(localPath.getName()) addClasspathEntry(buildPath(Environment.PWD.$$(), linkName), env) } } /** * Add the given path to the classpath entry of the given environment map. * If the classpath is already set, this appends the new path to the existing classpath. */ private def addClasspathEntry(path: String, env: HashMap[String, String]): Unit = YarnSparkHadoopUtil.addPathToEnvironment(env, Environment.CLASSPATH.name, path) /** * Returns the path to be sent to the NM for a path that is valid on the gateway. * * This method uses two configuration values: * * - spark.yarn.config.gatewayPath: a string that identifies a portion of the input path that may * only be valid in the gateway node. * - spark.yarn.config.replacementPath: a string with which to replace the gateway path. This may * contain, for example, env variable references, which will be expanded by the NMs when * starting containers. * * If either config is not available, the input path is returned. */ def getClusterPath(conf: SparkConf, path: String): String = { val localPath = conf.get(GATEWAY_ROOT_PATH) val clusterPath = conf.get(REPLACEMENT_ROOT_PATH) if (localPath != null && clusterPath != null) { path.replace(localPath, clusterPath) } else { path } } /** * Return whether two URI represent file system are the same */ private[spark] def compareUri(srcUri: URI, dstUri: URI): Boolean = { if (srcUri.getScheme() == null || srcUri.getScheme() != dstUri.getScheme()) { return false } val srcAuthority = srcUri.getAuthority() val dstAuthority = dstUri.getAuthority() if (srcAuthority != null && !srcAuthority.equalsIgnoreCase(dstAuthority)) { return false } var srcHost = srcUri.getHost() var dstHost = dstUri.getHost() // In HA or when using viewfs, the host part of the URI may not actually be a host, but the // name of the HDFS namespace. Those names won't resolve, so avoid even trying if they // match. if (srcHost != null && dstHost != null && srcHost != dstHost) { try { srcHost = InetAddress.getByName(srcHost).getCanonicalHostName() dstHost = InetAddress.getByName(dstHost).getCanonicalHostName() } catch { case e: UnknownHostException => return false } } Objects.equal(srcHost, dstHost) && srcUri.getPort() == dstUri.getPort() } /** * Return whether the two file systems are the same. */ protected def compareFs(srcFs: FileSystem, destFs: FileSystem): Boolean = { val srcUri = srcFs.getUri() val dstUri = destFs.getUri() compareUri(srcUri, dstUri) } /** * Given a local URI, resolve it and return a qualified local path that corresponds to the URI. * This is used for preparing local resources to be included in the container launch context. */ private def getQualifiedLocalPath(localURI: URI, hadoopConf: Configuration): Path = { val qualifiedURI = if (localURI.getScheme == null) { // If not specified, assume this is in the local filesystem to keep the behavior // consistent with that of Hadoop new URI(FileSystem.getLocal(hadoopConf).makeQualified(new Path(localURI)).toString) } else { localURI } new Path(qualifiedURI) } /** * Whether to consider jars provided by the user to have precedence over the Spark jars when * loading user classes. */ def isUserClassPathFirst(conf: SparkConf, isDriver: Boolean): Boolean = { if (isDriver) { conf.get(DRIVER_USER_CLASS_PATH_FIRST) } else { conf.get(EXECUTOR_USER_CLASS_PATH_FIRST) } } /** * Joins all the path components using Path.SEPARATOR. */ def buildPath(components: String*): String = { components.mkString(Path.SEPARATOR) } def createAppReport(report: ApplicationReport): YarnAppReport = { val diags = report.getDiagnostics() val diagsOpt = if (diags != null && diags.nonEmpty) Some(diags) else None YarnAppReport(report.getYarnApplicationState(), report.getFinalApplicationStatus(), diagsOpt) } /** * Create a properly quoted and escaped library path string to be added as a prefix to the command * executed by YARN. This is different from normal quoting / escaping due to YARN executing the * command through "bash -c". */ def createLibraryPathPrefix(libpath: String, conf: SparkConf): String = { val cmdPrefix = if (Utils.isWindows) { Utils.libraryPathEnvPrefix(Seq(libpath)) } else { val envName = Utils.libraryPathEnvName // For quotes, escape both the quote and the escape character when encoding in the command // string. val quoted = libpath.replace("\\"", "\\\\\\\\\\\\\\"") envName + "=\\\\\\"" + quoted + File.pathSeparator + "$" + envName + "\\\\\\"" } getClusterPath(conf, cmdPrefix) } def confToProperties(conf: SparkConf): Properties = { val props = new Properties() conf.getAll.foreach { case (k, v) => props.setProperty(k, v) } props } def writePropertiesToArchive(props: Properties, name: String, out: ZipOutputStream): Unit = { out.putNextEntry(new ZipEntry(name)) val writer = new OutputStreamWriter(out, StandardCharsets.UTF_8) props.store(writer, "Spark configuration.") writer.flush() out.closeEntry() } } private[spark] class YarnClusterApplication extends SparkApplication { override def start(args: Array[String], conf: SparkConf): Unit = { // SparkSubmit would use yarn cache to distribute files & jars in yarn mode, // so remove them from sparkConf here for yarn mode. conf.remove(JARS) conf.remove(FILES) conf.remove(ARCHIVES) new Client(new ClientArguments(args), conf, null).run() } } private[spark] case class YarnAppReport( appState: YarnApplicationState, finalState: FinalApplicationStatus, diagnostics: Option[String])
wangmiao1981/spark
resource-managers/yarn/src/main/scala/org/apache/spark/deploy/yarn/Client.scala
Scala
apache-2.0
71,946
/* * Accio is a platform to launch computer science experiments. * Copyright (C) 2016-2018 Vincent Primault <[email protected]> * * Accio is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * Accio is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with Accio. If not, see <http://www.gnu.org/licenses/>. */ package fr.cnrs.liris.locapriv.ops import fr.cnrs.liris.lumos.domain.RemoteFile import fr.cnrs.liris.accio.sdk._ import fr.cnrs.liris.locapriv.domain.Event @Op( category = "transform", help = "Enforce a given size on each trace.", description = "Larger traces will be truncated, smaller traces will be discarded.") case class EnforceSizeOp( @Arg(help = "Minimum number of events in each trace") minSize: Option[Int], @Arg(help = "Maximum number of events in each trace") maxSize: Option[Int], @Arg(help = "Input dataset") data: RemoteFile) extends TransformOp[Event] { override protected def transform(key: String, trace: Iterable[Event]): Iterable[Event] = { var result = trace maxSize.foreach { size => if (result.size > size) { result = result.take(size) } } minSize match { case None => result case Some(size) => if (result.size < size) Seq.empty else result } } }
privamov/accio
accio/java/fr/cnrs/liris/locapriv/ops/EnforceSizeOp.scala
Scala
gpl-3.0
1,711
package models.job import java.sql.Timestamp import akka.actor.ActorSystem import com.scalableminds.util.accesscontext.{DBAccessContext, GlobalAccessContext} import com.scalableminds.util.geometry.BoundingBox import com.scalableminds.util.mvc.Formatter import com.scalableminds.util.tools.{Fox, FoxImplicits} import com.scalableminds.webknossos.schema.Tables._ import com.typesafe.scalalogging.LazyLogging import javax.inject.Inject import models.analytics.{AnalyticsService, FailedJobEvent, RunJobEvent} import models.binary.{DataSetDAO, DataStoreDAO} import models.job.JobState.JobState import models.organization.OrganizationDAO import models.user.{MultiUserDAO, User, UserDAO} import oxalis.telemetry.SlackNotificationService import play.api.inject.ApplicationLifecycle import play.api.libs.json.{JsObject, Json} import slick.jdbc.PostgresProfile.api._ import slick.jdbc.TransactionIsolation.Serializable import slick.lifted.Rep import utils.{ObjectId, SQLClient, SQLDAO, WkConf} import scala.concurrent.ExecutionContext import scala.concurrent.duration.FiniteDuration import scala.concurrent.duration._ case class Job( _id: ObjectId, _owner: ObjectId, _dataStore: String, command: String, commandArgs: JsObject = Json.obj(), state: JobState = JobState.PENDING, manualState: Option[JobState] = None, _worker: Option[ObjectId] = None, latestRunId: Option[String] = None, returnValue: Option[String] = None, started: Option[Long] = None, ended: Option[Long] = None, created: Long = System.currentTimeMillis(), isDeleted: Boolean = false ) { def isEnded: Boolean = { val relevantState = manualState.getOrElse(state) relevantState == JobState.SUCCESS || state == JobState.FAILURE } def duration: Option[FiniteDuration] = for { e <- ended s <- started } yield (e - s).millis def effectiveState: JobState = manualState.getOrElse(state) def exportFileName: Option[String] = argAsStringOpt("export_file_name") def datasetName: Option[String] = argAsStringOpt("dataset_name") private def argAsStringOpt(key: String) = (commandArgs \\ key).toOption.flatMap(_.asOpt[String]) def resultLink(organizationName: String, dataStorePublicUrl: String): Option[String] = if (effectiveState != JobState.SUCCESS) None else { command match { case "convert_to_wkw" => datasetName.map { dsName => s"/datasets/$organizationName/$dsName/view" } case "export_tiff" => Some(s"$dataStorePublicUrl/data/exports/${_id.id}/download") case "infer_nuclei" | "infer_neurons" => returnValue.map { resultDatasetName => s"/datasets/$organizationName/$resultDatasetName/view" } case _ => None } } } class JobDAO @Inject()(sqlClient: SQLClient)(implicit ec: ExecutionContext) extends SQLDAO[Job, JobsRow, Jobs](sqlClient) { val collection = Jobs def idColumn(x: Jobs): Rep[String] = x._Id def isDeletedColumn(x: Jobs): Rep[Boolean] = x.isdeleted def parse(r: JobsRow): Fox[Job] = for { manualStateOpt <- Fox.runOptional(r.manualstate)(JobState.fromString) state <- JobState.fromString(r.state) } yield { Job( ObjectId(r._Id), ObjectId(r._Owner), r._Datastore.trim, r.command, Json.parse(r.commandargs).as[JsObject], state, manualStateOpt, r._Worker.map(ObjectId(_)), r.latestrunid, r.returnvalue, r.started.map(_.getTime), r.ended.map(_.getTime), r.created.getTime, r.isdeleted ) } override def readAccessQ(requestingUserId: ObjectId) = s"""_owner = '$requestingUserId'""" override def findAll(implicit ctx: DBAccessContext): Fox[List[Job]] = for { accessQuery <- readAccessQuery r <- run(sql"select #$columns from #$existingCollectionName where #$accessQuery order by created".as[JobsRow]) parsed <- parseAll(r) } yield parsed override def findOne(jobId: ObjectId)(implicit ctx: DBAccessContext): Fox[Job] = for { accessQuery <- readAccessQuery r <- run(sql"select #$columns from #$existingCollectionName where #$accessQuery and _id = $jobId".as[JobsRow]) parsed <- parseFirst(r, jobId) } yield parsed def countUnassignedPendingForDataStore(_dataStore: String): Fox[Int] = for { r <- run(sql"""select count(_id) from #$existingCollectionName where state = '#${JobState.PENDING}' and manualState is null and _dataStore = ${_dataStore} and _worker is null""".as[Int]) head <- r.headOption } yield head def countUnfinishedByWorker(workerId: ObjectId): Fox[Int] = for { r <- run( sql"select count(_id) from #$existingCollectionName where _worker = $workerId and state in ('#${JobState.PENDING}', '#${JobState.STARTED}') and manualState is null" .as[Int]) head <- r.headOption } yield head def findAllUnfinishedByWorker(workerId: ObjectId): Fox[List[Job]] = for { r <- run( sql"select #$columns from #$existingCollectionName where _worker = $workerId and state in ('#${JobState.PENDING}', '#${JobState.STARTED}') and manualState is null order by created" .as[JobsRow]) parsed <- parseAll(r) } yield parsed /* * Jobs that are cancelled by the user (manualState set to cancelled) * but not yet cancelled in the worker (state not yet set to cancelled) * are sent to the worker in to_cancel list. These are gathered here. * Compare the note on the job cancelling protocol in JobsController */ def findAllCancellingByWorker(workerId: ObjectId): Fox[List[Job]] = for { r <- run( sql"select #$columns from #$existingCollectionName where _worker = $workerId and state != '#${JobState.CANCELLED}' and manualState = '#${JobState.CANCELLED}'" .as[JobsRow]) parsed <- parseAll(r) } yield parsed def isOwnedBy(_id: String, _user: ObjectId): Fox[Boolean] = for { results: Seq[String] <- run( sql"select _id from #$existingCollectionName where _id = ${_id} and _owner = ${_user}".as[String]) } yield results.nonEmpty def insertOne(j: Job): Fox[Unit] = for { _ <- run( sqlu"""insert into webknossos.jobs(_id, _owner, _dataStore, command, commandArgs, state, manualState, _worker, latestRunId, returnValue, started, ended, created, isDeleted) values(${j._id}, ${j._owner}, ${j._dataStore}, ${j.command}, '#${sanitize( j.commandArgs.toString)}', '#${j.state.toString}', #${optionLiteralSanitized(j.manualState.map(_.toString))}, #${optionLiteral(j._worker.map(_.toString))}, #${optionLiteralSanitized(j.latestRunId)}, #${optionLiteralSanitized(j.returnValue)}, #${optionLiteral(j.started.map(_.toString))}, #${optionLiteral(j.ended.map(_.toString))}, ${new java.sql.Timestamp(j.created)}, ${j.isDeleted})""") } yield () def updateManualState(id: ObjectId, manualState: JobState)(implicit ctx: DBAccessContext): Fox[Unit] = for { _ <- assertUpdateAccess(id) _ <- run(sqlu"""update webknossos.jobs set manualState = '#${manualState.toString}' where _id = $id""") } yield () def updateStatus(jobId: ObjectId, s: JobStatus): Fox[Unit] = { val format = new java.text.SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSSX") val startedTimestamp = s.started.map(started => format.format(new Timestamp(started))) val endedTimestamp = s.ended.map(ended => format.format(new Timestamp(ended))) for { _ <- run(sqlu"""update webknossos.jobs set latestRunId = #${optionLiteralSanitized(s.latestRunId)}, state = '#${s.state.toString}', returnValue = #${optionLiteralSanitized(s.returnValue)}, started = #${optionLiteralSanitized(startedTimestamp)}, ended = #${optionLiteralSanitized(endedTimestamp)} where _id = $jobId""") } yield () } def reserveNextJob(worker: Worker): Fox[Unit] = { val query = sqlu""" with subquery as ( select _id from webknossos.jobs_ where state = '#${JobState.PENDING}' and _dataStore = ${worker._dataStore} and manualState is NULL and _worker is NULL order by created limit 1 ) update webknossos.jobs_ j set _worker = ${worker._id} from subquery where j._id = subquery._id """ for { _ <- run( query.withTransactionIsolation(Serializable), retryCount = 50, retryIfErrorContains = List(transactionSerializationError) ) } yield () } def countByState: Fox[Map[String, Int]] = for { result <- run(sql"""select state, count(_id) from webknossos.jobs_ where manualState is null group by state order by state """.as[(String, Int)]) } yield result.toMap } class JobService @Inject()(wkConf: WkConf, userDAO: UserDAO, multiUserDAO: MultiUserDAO, jobDAO: JobDAO, dataStoreDAO: DataStoreDAO, organizationDAO: OrganizationDAO, dataSetDAO: DataSetDAO, analyticsService: AnalyticsService, slackNotificationService: SlackNotificationService, val lifecycle: ApplicationLifecycle, val system: ActorSystem)(implicit ec: ExecutionContext) extends FoxImplicits with LazyLogging with Formatter { def trackStatusChange(jobBeforeChange: Job, jobAfterChange: Job): Unit = { if (jobBeforeChange.isEnded) return if (jobAfterChange.state == JobState.SUCCESS) trackNewlySuccessful(jobBeforeChange, jobAfterChange) if (jobAfterChange.state == JobState.FAILURE) trackNewlyFailed(jobBeforeChange, jobAfterChange) } private def trackNewlyFailed(jobBeforeChange: Job, jobAfterChange: Job): Unit = { for { user <- userDAO.findOne(jobBeforeChange._owner)(GlobalAccessContext) multiUser <- multiUserDAO.findOne(user._multiUser)(GlobalAccessContext) organization <- organizationDAO.findOne(user._organization)(GlobalAccessContext) superUserLabel = if (multiUser.isSuperUser) " (for superuser)" else "" durationLabel = jobAfterChange.duration.map(d => s" after ${formatDuration(d)}").getOrElse("") _ = analyticsService.track(FailedJobEvent(user, jobBeforeChange.command)) msg = s"Job ${jobBeforeChange._id} failed$durationLabel. Command ${jobBeforeChange.command}, organization name: ${organization.name}." _ = logger.warn(msg) _ = slackNotificationService.warn( s"Failed job$superUserLabel", msg ) } yield () () } private def trackNewlySuccessful(jobBeforeChange: Job, jobAfterChange: Job): Unit = { for { user <- userDAO.findOne(jobBeforeChange._owner)(GlobalAccessContext) organization <- organizationDAO.findOne(user._organization)(GlobalAccessContext) dataStore <- dataStoreDAO.findOneByName(jobBeforeChange._dataStore)(GlobalAccessContext) resultLink = jobAfterChange.resultLink(organization.name, dataStore.publicUrl) resultLinkMrkdwn = resultLink.map(l => s" <${wkConf.Http.uri}$l|Result>").getOrElse("") multiUser <- multiUserDAO.findOne(user._multiUser)(GlobalAccessContext) superUserLabel = if (multiUser.isSuperUser) " (for superuser)" else "" durationLabel = jobAfterChange.duration.map(d => s" after ${formatDuration(d)}").getOrElse("") msg = s"Job ${jobBeforeChange._id} succeeded$durationLabel. Command ${jobBeforeChange.command}, organization name: ${organization.name}.$resultLinkMrkdwn" _ = logger.info(msg) _ = slackNotificationService.success( s"Successful job$superUserLabel", msg ) } yield () () } def cleanUpIfFailed(job: Job): Fox[Unit] = if (job.state == JobState.FAILURE && job.command == "convert_to_wkw") { logger.info(s"WKW conversion job ${job._id} failed. Deleting dataset from the database, freeing the name...") val commandArgs = job.commandArgs.value for { datasetName <- commandArgs.get("dataset_name").map(_.as[String]).toFox organizationName <- commandArgs.get("organization_name").map(_.as[String]).toFox dataset <- dataSetDAO.findOneByNameAndOrganizationName(datasetName, organizationName)(GlobalAccessContext) _ <- dataSetDAO.deleteDataset(dataset._id) } yield () } else Fox.successful(()) def publicWrites(job: Job)(implicit ctx: DBAccessContext): Fox[JsObject] = for { owner <- userDAO.findOne(job._owner) ?~> "user.notFound" organization <- organizationDAO.findOne(owner._organization) ?~> "organization.notFound" dataStore <- dataStoreDAO.findOneByName(job._dataStore) ?~> "dataStore.notFound" resultLink = job.resultLink(organization.name, dataStore.publicUrl) } yield { Json.obj( "id" -> job._id.id, "command" -> job.command, "commandArgs" -> (job.commandArgs - "webknossos_token" - "user_auth_token"), "state" -> job.state, "manualState" -> job.manualState, "latestRunId" -> job.latestRunId, "returnValue" -> job.returnValue, "resultLink" -> resultLink, "created" -> job.created, "started" -> job.started, "ended" -> job.ended, ) } def parameterWrites(job: Job): JsObject = Json.obj( "job_id" -> job._id.id, "command" -> job.command, "job_kwargs" -> job.commandArgs ) def submitJob(command: String, commandArgs: JsObject, owner: User, dataStoreName: String): Fox[Job] = for { _ <- bool2Fox(wkConf.Features.jobsEnabled) ?~> "job.disabled" job = Job(ObjectId.generate, owner._id, dataStoreName, command, commandArgs) _ <- jobDAO.insertOne(job) _ = analyticsService.track(RunJobEvent(owner, command)) } yield job def assertTiffExportBoundingBoxLimits(bbox: String): Fox[Unit] = for { boundingBox <- BoundingBox.createFrom(bbox).toFox ?~> "job.export.tiff.invalidBoundingBox" _ <- bool2Fox(boundingBox.volume <= wkConf.Features.exportTiffMaxVolumeMVx * 1024 * 1024) ?~> "job.export.tiff.volumeExceeded" _ <- bool2Fox(boundingBox.dimensions.maxDim <= wkConf.Features.exportTiffMaxEdgeLengthVx) ?~> "job.export.tiff.edgeLengthExceeded" } yield () }
scalableminds/webknossos
app/models/job/Job.scala
Scala
agpl-3.0
14,998
/* * Copyright 2018 CJWW Development * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package utils trait IntegrationStubbing { self: IntegrationSpec => class PreconditionBuilder { implicit val builder: PreconditionBuilder = this def currentUsers: CurrentUsers = CurrentUsers() def individualUser: IndividualUser = IndividualUser() def organisationUser: OrganisationUser = OrganisationUser() } def given: PreconditionBuilder = new PreconditionBuilder case class CurrentUsers()(implicit builder: PreconditionBuilder) { def hasCurrentUser: PreconditionBuilder = { await(contextRepository.collection.flatMap(_.insert(testCurrentUser))) builder } } case class IndividualUser()(implicit builder: PreconditionBuilder) { def isSetup: PreconditionBuilder = { await(loginRepository.collection.flatMap(_.insert(testAccount))) builder } } case class OrganisationUser()(implicit builder: PreconditionBuilder) { def isSetup: PreconditionBuilder = { await(orgLoginRepository.collection.flatMap(_.insert(testOrgAccount))) builder } } }
cjww-development/auth-microservice
it/utils/IntegrationStubbing.scala
Scala
apache-2.0
1,651
package com.equalinformation.poc.akka import org.junit._ import Assert._ @Test class AppTest { @Test def testOK() = assertTrue(true) // @Test // def testKO() = assertTrue(false) }
bpupadhyaya/AkkaPOCMaven
src/test/scala/com/equalinformation/poc/akka/AppTest.scala
Scala
mit
201
package scscene import java.awt.{ Paint, Font, Composite, Shape, Graphics2D } import scgeom._ case class TextFigure( clip:Option[Clip], transform:SgAffineTransform, composite:Composite, paint:Paint, font:Font, text:String ) extends Figure { private val layout = ShapeUtil textLayout (text, font) private val origin = SgPoint fromPoint2D (ShapeUtil textOrigin layout) lazy val globalBounds:SgRectangle = SgRectangle fromRectangle2D (ShapeUtil inflate (globalShape.getBounds2D, 1)) final def globalPicked(at:SgPoint):Boolean = (clip forall { _ globalPicked at }) && (globalShape contains at.toPoint2D) private lazy val globalShape:Shape = { val otrans = SgAffineTransform translate -origin val shape = layout getOutline otrans.delegate transform transformShape shape } def paintImpl(g:Graphics2D) { for (c <- clip) { g clip c.globalShape } g transform transform.toAffineTransform g setComposite composite g setPaint paint g setFont font g drawString( text, -origin.x.toFloat, -origin.y.toFloat) } }
ritschwumm/scscene
src/main/scala/scscene/TextFigure.scala
Scala
bsd-2-clause
1,079
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.sysml.api.ml.serving import org.apache.sysml.runtime.matrix.data.MatrixBlock object BatchingUtils { def batchRequests(requests: Array[SchedulingRequest]) : MatrixBlock = { if (requests.length == 1) { return requests(0).request.data } val ncol = requests(0).request.data.getNumColumns val res = new MatrixBlock(requests.length, ncol, -1).allocateDenseBlock() val doubles = res.getDenseBlockValues var start = 0 for (req <- requests) { System.arraycopy(req.request.data.getDenseBlockValues, 0, doubles, start, ncol) start += ncol } res.setNonZeros(-1) res } def unbatchRequests(requests: Array[SchedulingRequest], batchedResults: MatrixBlock) : Array[PredictionResponse] = { var responses = Array[PredictionResponse]() val start = 0 for (req <- requests) { val unbatchStart = System.nanoTime() val resp = PredictionResponse(batchedResults.slice( start, (start + req.request.requestSize)-1), batchedResults.getNumRows, req.statistics) val unbatchingTime = System.nanoTime() - unbatchStart if (req.statistics != null) req.statistics.unbatchingTime = unbatchingTime responses :+= resp } responses } }
niketanpansare/incubator-systemml
src/main/scala/org/apache/sysml/api/ml/serving/BatchingUtils.scala
Scala
apache-2.0
2,355
val b = new { val a = Vector(3,6,7) } val c = b a 2 /*start*/c/*end*/ //Int
LPTK/intellij-scala
testdata/typeInference/bugs5/SCL4482.scala
Scala
apache-2.0
75
package org.jetbrains.plugins.scala package codeInspection package packageNameInspection import com.intellij.codeInspection._ import com.intellij.openapi.project.Project import com.intellij.psi.PsiFile import org.jetbrains.plugins.scala.extensions.ObjectExt import org.jetbrains.plugins.scala.lang.psi.api.ScalaFile import org.jetbrains.plugins.scala.settings.ScalaProjectSettings import scala.collection.JavaConverters._ class ChainedPackageInspection extends LocalInspectionTool { override def isEnabledByDefault = true override def getID = "ScalaChainedPackageClause" // TODO support multiple base packages simultaneously override def checkFile(file: PsiFile, manager: InspectionManager, isOnTheFly: Boolean) = { val problems = file.asOptionOf[ScalaFile].filter(!_.isScriptFile()).flatMap { scalaFile => scalaFile.getPackagings.headOption.flatMap { firstPackaging => val basePackages = ScalaProjectSettings.getInstance(file.getProject).getBasePackages.asScala basePackages.find(basePackage => firstPackaging.getPackageName != basePackage && firstPackaging.getPackageName.startsWith(basePackage)).flatMap { basePackage => firstPackaging.reference.map(_.getTextRange).map { range => manager.createProblemDescriptor(file, range, "Package declaration could use chained package clauses", ProblemHighlightType.WEAK_WARNING, false, new UseChainedPackageQuickFix(scalaFile, basePackage)) } } } } problems.toArray } } class UseChainedPackageQuickFix(myFile: ScalaFile, basePackage: String) extends AbstractFixOnPsiElement(s"Use chained package clauses: package $basePackage; package ...", myFile) { def doApplyFix(project: Project) { val file = getElement if (file.isValid) file.setPackageName(file.packageName) } override def getFamilyName = "Use chained package clauses" }
whorbowicz/intellij-scala
src/org/jetbrains/plugins/scala/codeInspection/packageNameInspection/ChainedPackageInspection.scala
Scala
apache-2.0
1,921
// Copyright 2014 Commonwealth Bank of Australia // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package au.com.cba.omnia.maestro.macros import scalaz._, Scalaz._ import au.com.cba.omnia.maestro.test.Spec import au.com.cba.omnia.maestro.test.thrift.humbug.{Types => HTypes} import au.com.cba.omnia.maestro.test.thrift.scrooge.{Types => STypes} object TagMacroSpec extends Spec { def is = s2""" TagMacro =========== The tag macro creates tag with the names of the thrift fields for humbug $tagHumbug with the names of the thrift fields for scrooge $tagScrooge """ def tagHumbug = { //given val typesList = List("stringField", "booleanField", "intField", "longField", "doubleField", "optIntField", "optStringField") //when val humbugTag = Macros.mkTag[HTypes] //then val humbugFields = humbugTag.run(List("1", "2", "3", "4", "5", "6", "7")) humbugFields.map(_.map({ case (_, field) => field.name })) === typesList.right } def tagScrooge = { //given val typesList = List("stringField", "booleanField", "intField", "longField", "doubleField", "optIntField", "optStringField") //when val scroogeTag = Macros.mkTag[STypes] //then val scroogeFields = scroogeTag.run(List("1", "2", "3", "4", "5", "6", "7")) scroogeFields.map(_.map({ case (_, field) => field.name })) === typesList.right } }
toddmowen/maestro
maestro-macros/src/test/scala/au/com/cba/omnia/maestro/macros/TagMacroSpec.scala
Scala
apache-2.0
1,895
import scalajs.js import angulate2._ //@Injectable class FriendsService { val names = js.Array("Aarav","Martín","Shannon","Ariana","Kai") } @Component( selector = "display", template = """<p>My name: {{ myName }}</p> <p>Friends:</p> <ul> <li *ngFor="let name of names">{{name}}</li> </ul> <p *ngIf="names.length > 3">You have many friends!</p> """, directives = js.Array( ng.common.NgFor, ng.common.NgIf ) ) class DisplayApp { val myName = "Alice" val names = js.Array("Aarav","Martín","Shannon","Ariana","Kai") //def names = friends.names }
jokade/angulate2-examples
archive/02_displayData/src/main/scala/main.scala
Scala
mit
658
package pe.ambivalenta.rsslib trait iTunes{ val subtitle : String val summary : String val author : String val imageURL : String val duration : Int } case class Pod(title:String, url:String)
AmbivalentApe/rsslib
src/main/scala/rss/podcast.scala
Scala
mit
204
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.execution import java.util.concurrent.TimeUnit._ import scala.collection.mutable.HashMap import org.apache.commons.lang3.StringUtils import org.apache.hadoop.fs.Path import org.apache.spark.rdd.RDD import org.apache.spark.sql.SparkSession import org.apache.spark.sql.catalyst.{InternalRow, TableIdentifier} import org.apache.spark.sql.catalyst.catalog.BucketSpec import org.apache.spark.sql.catalyst.expressions._ import org.apache.spark.sql.catalyst.plans.QueryPlan import org.apache.spark.sql.catalyst.plans.physical.{HashPartitioning, Partitioning, UnknownPartitioning} import org.apache.spark.sql.catalyst.util.truncatedString import org.apache.spark.sql.execution.datasources._ import org.apache.spark.sql.execution.datasources.parquet.{ParquetFileFormat => ParquetSource} import org.apache.spark.sql.execution.metric.{SQLMetric, SQLMetrics} import org.apache.spark.sql.internal.SQLConf import org.apache.spark.sql.sources.{BaseRelation, Filter} import org.apache.spark.sql.types.StructType import org.apache.spark.sql.vectorized.ColumnarBatch import org.apache.spark.util.Utils import org.apache.spark.util.collection.BitSet trait DataSourceScanExec extends LeafExecNode { val relation: BaseRelation val tableIdentifier: Option[TableIdentifier] protected val nodeNamePrefix: String = "" override val nodeName: String = { s"Scan $relation ${tableIdentifier.map(_.unquotedString).getOrElse("")}" } // Metadata that describes more details of this scan. protected def metadata: Map[String, String] override def simpleString(maxFields: Int): String = { val metadataEntries = metadata.toSeq.sorted.map { case (key, value) => key + ": " + StringUtils.abbreviate(redact(value), 100) } val metadataStr = truncatedString(metadataEntries, " ", ", ", "", maxFields) redact( s"$nodeNamePrefix$nodeName${truncatedString(output, "[", ",", "]", maxFields)}$metadataStr") } /** * Shorthand for calling redactString() without specifying redacting rules */ private def redact(text: String): String = { Utils.redact(sqlContext.sessionState.conf.stringRedactionPattern, text) } /** * The data being read in. This is to provide input to the tests in a way compatible with * [[InputRDDCodegen]] which all implementations used to extend. */ def inputRDDs(): Seq[RDD[InternalRow]] } /** Physical plan node for scanning data from a relation. */ case class RowDataSourceScanExec( fullOutput: Seq[Attribute], requiredColumnsIndex: Seq[Int], filters: Set[Filter], handledFilters: Set[Filter], rdd: RDD[InternalRow], @transient relation: BaseRelation, override val tableIdentifier: Option[TableIdentifier]) extends DataSourceScanExec with InputRDDCodegen { def output: Seq[Attribute] = requiredColumnsIndex.map(fullOutput) override lazy val metrics = Map("numOutputRows" -> SQLMetrics.createMetric(sparkContext, "number of output rows")) protected override def doExecute(): RDD[InternalRow] = { val numOutputRows = longMetric("numOutputRows") rdd.mapPartitionsWithIndexInternal { (index, iter) => val proj = UnsafeProjection.create(schema) proj.initialize(index) iter.map( r => { numOutputRows += 1 proj(r) }) } } // Input can be InternalRow, has to be turned into UnsafeRows. override protected val createUnsafeProjection: Boolean = true override def inputRDD: RDD[InternalRow] = rdd override val metadata: Map[String, String] = { val markedFilters = for (filter <- filters) yield { if (handledFilters.contains(filter)) s"*$filter" else s"$filter" } Map( "ReadSchema" -> output.toStructType.catalogString, "PushedFilters" -> markedFilters.mkString("[", ", ", "]")) } // Don't care about `rdd` and `tableIdentifier` when canonicalizing. override def doCanonicalize(): SparkPlan = copy( fullOutput.map(QueryPlan.normalizeExpressions(_, fullOutput)), rdd = null, tableIdentifier = None) } /** * Physical plan node for scanning data from HadoopFsRelations. * * @param relation The file-based relation to scan. * @param output Output attributes of the scan, including data attributes and partition attributes. * @param requiredSchema Required schema of the underlying relation, excluding partition columns. * @param partitionFilters Predicates to use for partition pruning. * @param optionalBucketSet Bucket ids for bucket pruning * @param dataFilters Filters on non-partition columns. * @param tableIdentifier identifier for the table in the metastore. */ case class FileSourceScanExec( @transient relation: HadoopFsRelation, output: Seq[Attribute], requiredSchema: StructType, partitionFilters: Seq[Expression], optionalBucketSet: Option[BitSet], dataFilters: Seq[Expression], override val tableIdentifier: Option[TableIdentifier]) extends DataSourceScanExec { // Note that some vals referring the file-based relation are lazy intentionally // so that this plan can be canonicalized on executor side too. See SPARK-23731. override lazy val supportsColumnar: Boolean = { relation.fileFormat.supportBatch(relation.sparkSession, schema) } private lazy val needsUnsafeRowConversion: Boolean = { if (relation.fileFormat.isInstanceOf[ParquetSource]) { SparkSession.getActiveSession.get.sessionState.conf.parquetVectorizedReaderEnabled } else { false } } override def vectorTypes: Option[Seq[String]] = relation.fileFormat.vectorTypes( requiredSchema = requiredSchema, partitionSchema = relation.partitionSchema, relation.sparkSession.sessionState.conf) val driverMetrics: HashMap[String, Long] = HashMap.empty /** * Send the driver-side metrics. Before calling this function, selectedPartitions has * been initialized. See SPARK-26327 for more details. */ private def sendDriverMetrics(): Unit = { driverMetrics.foreach(e => metrics(e._1).add(e._2)) val executionId = sparkContext.getLocalProperty(SQLExecution.EXECUTION_ID_KEY) SQLMetrics.postDriverMetricUpdates(sparkContext, executionId, metrics.filter(e => driverMetrics.contains(e._1)).values.toSeq) } @transient private lazy val selectedPartitions: Array[PartitionDirectory] = { val optimizerMetadataTimeNs = relation.location.metadataOpsTimeNs.getOrElse(0L) val startTime = System.nanoTime() val ret = relation.location.listFiles(partitionFilters, dataFilters) if (relation.partitionSchemaOption.isDefined) { driverMetrics("numPartitions") = ret.length } driverMetrics("numFiles") = ret.map(_.files.size.toLong).sum val timeTakenMs = NANOSECONDS.toMillis( (System.nanoTime() - startTime) + optimizerMetadataTimeNs) driverMetrics("metadataTime") = timeTakenMs ret }.toArray /** * [[partitionFilters]] can contain subqueries whose results are available only at runtime so * accessing [[selectedPartitions]] should be guarded by this method during planning */ private def hasPartitionsAvailableAtRunTime: Boolean = { partitionFilters.exists(ExecSubqueryExpression.hasSubquery) } override lazy val (outputPartitioning, outputOrdering): (Partitioning, Seq[SortOrder]) = { val bucketSpec = if (relation.sparkSession.sessionState.conf.bucketingEnabled) { relation.bucketSpec } else { None } bucketSpec match { case Some(spec) => // For bucketed columns: // ----------------------- // `HashPartitioning` would be used only when: // 1. ALL the bucketing columns are being read from the table // // For sorted columns: // --------------------- // Sort ordering should be used when ALL these criteria's match: // 1. `HashPartitioning` is being used // 2. A prefix (or all) of the sort columns are being read from the table. // // Sort ordering would be over the prefix subset of `sort columns` being read // from the table. // eg. // Assume (col0, col2, col3) are the columns read from the table // If sort columns are (col0, col1), then sort ordering would be considered as (col0) // If sort columns are (col1, col0), then sort ordering would be empty as per rule #2 // above def toAttribute(colName: String): Option[Attribute] = output.find(_.name == colName) val bucketColumns = spec.bucketColumnNames.flatMap(n => toAttribute(n)) if (bucketColumns.size == spec.bucketColumnNames.size) { val partitioning = HashPartitioning(bucketColumns, spec.numBuckets) val sortColumns = spec.sortColumnNames.map(x => toAttribute(x)).takeWhile(x => x.isDefined).map(_.get) val shouldCalculateSortOrder = conf.getConf(SQLConf.LEGACY_BUCKETED_TABLE_SCAN_OUTPUT_ORDERING) && sortColumns.nonEmpty && !hasPartitionsAvailableAtRunTime val sortOrder = if (shouldCalculateSortOrder) { // In case of bucketing, its possible to have multiple files belonging to the // same bucket in a given relation. Each of these files are locally sorted // but those files combined together are not globally sorted. Given that, // the RDD partition will not be sorted even if the relation has sort columns set // Current solution is to check if all the buckets have a single file in it val files = selectedPartitions.flatMap(partition => partition.files) val bucketToFilesGrouping = files.map(_.getPath.getName).groupBy(file => BucketingUtils.getBucketId(file)) val singleFilePartitions = bucketToFilesGrouping.forall(p => p._2.length <= 1) if (singleFilePartitions) { // TODO Currently Spark does not support writing columns sorting in descending order // so using Ascending order. This can be fixed in future sortColumns.map(attribute => SortOrder(attribute, Ascending)) } else { Nil } } else { Nil } (partitioning, sortOrder) } else { (UnknownPartitioning(0), Nil) } case _ => (UnknownPartitioning(0), Nil) } } @transient private val pushedDownFilters = dataFilters.flatMap(DataSourceStrategy.translateFilter) logInfo(s"Pushed Filters: ${pushedDownFilters.mkString(",")}") override lazy val metadata: Map[String, String] = { def seqToString(seq: Seq[Any]) = seq.mkString("[", ", ", "]") val location = relation.location val locationDesc = location.getClass.getSimpleName + seqToString(location.rootPaths) val metadata = Map( "Format" -> relation.fileFormat.toString, "ReadSchema" -> requiredSchema.catalogString, "Batched" -> supportsColumnar.toString, "PartitionFilters" -> seqToString(partitionFilters), "PushedFilters" -> seqToString(pushedDownFilters), "DataFilters" -> seqToString(dataFilters), "Location" -> locationDesc) val withSelectedBucketsCount = relation.bucketSpec.map { spec => val numSelectedBuckets = optionalBucketSet.map { b => b.cardinality() } getOrElse { spec.numBuckets } metadata + ("SelectedBucketsCount" -> s"$numSelectedBuckets out of ${spec.numBuckets}") } getOrElse { metadata } withSelectedBucketsCount } lazy val inputRDD: RDD[InternalRow] = { val readFile: (PartitionedFile) => Iterator[InternalRow] = relation.fileFormat.buildReaderWithPartitionValues( sparkSession = relation.sparkSession, dataSchema = relation.dataSchema, partitionSchema = relation.partitionSchema, requiredSchema = requiredSchema, filters = pushedDownFilters, options = relation.options, hadoopConf = relation.sparkSession.sessionState.newHadoopConfWithOptions(relation.options)) val readRDD = relation.bucketSpec match { case Some(bucketing) if relation.sparkSession.sessionState.conf.bucketingEnabled => createBucketedReadRDD(bucketing, readFile, selectedPartitions, relation) case _ => createNonBucketedReadRDD(readFile, selectedPartitions, relation) } sendDriverMetrics() readRDD } override def inputRDDs(): Seq[RDD[InternalRow]] = { inputRDD :: Nil } override lazy val metrics = Map( "numOutputRows" -> SQLMetrics.createMetric(sparkContext, "number of output rows"), "numFiles" -> SQLMetrics.createMetric(sparkContext, "number of files read"), "metadataTime" -> SQLMetrics.createTimingMetric(sparkContext, "metadata time") ) ++ { // Tracking scan time has overhead, we can't afford to do it for each row, and can only do // it for each batch. if (supportsColumnar) { Some("scanTime" -> SQLMetrics.createTimingMetric(sparkContext, "scan time")) } else { None } } ++ { if (relation.partitionSchemaOption.isDefined) { Some("numPartitions" -> SQLMetrics.createMetric(sparkContext, "number of partitions read")) } else { None } } protected override def doExecute(): RDD[InternalRow] = { val numOutputRows = longMetric("numOutputRows") if (needsUnsafeRowConversion) { inputRDD.mapPartitionsWithIndexInternal { (index, iter) => val toUnsafe = UnsafeProjection.create(schema) toUnsafe.initialize(index) iter.map { row => numOutputRows += 1 toUnsafe(row) } } } else { inputRDD.mapPartitionsInternal { iter => iter.map { row => numOutputRows += 1 row } } } } protected override def doExecuteColumnar(): RDD[ColumnarBatch] = { val numOutputRows = longMetric("numOutputRows") val scanTime = longMetric("scanTime") inputRDD.asInstanceOf[RDD[ColumnarBatch]].mapPartitionsInternal { batches => new Iterator[ColumnarBatch] { override def hasNext: Boolean = { // The `FileScanRDD` returns an iterator which scans the file during the `hasNext` call. val startNs = System.nanoTime() val res = batches.hasNext scanTime += NANOSECONDS.toMillis(System.nanoTime() - startNs) res } override def next(): ColumnarBatch = { val batch = batches.next() numOutputRows += batch.numRows() batch } } } } override val nodeNamePrefix: String = "File" /** * Create an RDD for bucketed reads. * The non-bucketed variant of this function is [[createNonBucketedReadRDD]]. * * The algorithm is pretty simple: each RDD partition being returned should include all the files * with the same bucket id from all the given Hive partitions. * * @param bucketSpec the bucketing spec. * @param readFile a function to read each (part of a) file. * @param selectedPartitions Hive-style partition that are part of the read. * @param fsRelation [[HadoopFsRelation]] associated with the read. */ private def createBucketedReadRDD( bucketSpec: BucketSpec, readFile: (PartitionedFile) => Iterator[InternalRow], selectedPartitions: Array[PartitionDirectory], fsRelation: HadoopFsRelation): RDD[InternalRow] = { logInfo(s"Planning with ${bucketSpec.numBuckets} buckets") val filesGroupedToBuckets = selectedPartitions.flatMap { p => p.files.map { f => PartitionedFileUtil.getPartitionedFile(f, f.getPath, p.values) } }.groupBy { f => BucketingUtils .getBucketId(new Path(f.filePath).getName) .getOrElse(sys.error(s"Invalid bucket file ${f.filePath}")) } val prunedFilesGroupedToBuckets = if (optionalBucketSet.isDefined) { val bucketSet = optionalBucketSet.get filesGroupedToBuckets.filter { f => bucketSet.get(f._1) } } else { filesGroupedToBuckets } val filePartitions = Seq.tabulate(bucketSpec.numBuckets) { bucketId => FilePartition(bucketId, prunedFilesGroupedToBuckets.getOrElse(bucketId, Array.empty)) } new FileScanRDD(fsRelation.sparkSession, readFile, filePartitions) } /** * Create an RDD for non-bucketed reads. * The bucketed variant of this function is [[createBucketedReadRDD]]. * * @param readFile a function to read each (part of a) file. * @param selectedPartitions Hive-style partition that are part of the read. * @param fsRelation [[HadoopFsRelation]] associated with the read. */ private def createNonBucketedReadRDD( readFile: (PartitionedFile) => Iterator[InternalRow], selectedPartitions: Array[PartitionDirectory], fsRelation: HadoopFsRelation): RDD[InternalRow] = { val openCostInBytes = fsRelation.sparkSession.sessionState.conf.filesOpenCostInBytes val maxSplitBytes = FilePartition.maxSplitBytes(fsRelation.sparkSession, selectedPartitions) logInfo(s"Planning scan with bin packing, max size: $maxSplitBytes bytes, " + s"open cost is considered as scanning $openCostInBytes bytes.") val splitFiles = selectedPartitions.flatMap { partition => partition.files.flatMap { file => // getPath() is very expensive so we only want to call it once in this block: val filePath = file.getPath val isSplitable = relation.fileFormat.isSplitable( relation.sparkSession, relation.options, filePath) PartitionedFileUtil.splitFiles( sparkSession = relation.sparkSession, file = file, filePath = filePath, isSplitable = isSplitable, maxSplitBytes = maxSplitBytes, partitionValues = partition.values ) } }.sortBy(_.length)(implicitly[Ordering[Long]].reverse) val partitions = FilePartition.getFilePartitions(relation.sparkSession, splitFiles, maxSplitBytes) new FileScanRDD(fsRelation.sparkSession, readFile, partitions) } override def doCanonicalize(): FileSourceScanExec = { FileSourceScanExec( relation, output.map(QueryPlan.normalizeExpressions(_, output)), requiredSchema, QueryPlan.normalizePredicates(partitionFilters, output), optionalBucketSet, QueryPlan.normalizePredicates(dataFilters, output), None) } }
techaddict/spark
sql/core/src/main/scala/org/apache/spark/sql/execution/DataSourceScanExec.scala
Scala
apache-2.0
19,309
package com.roundeights.isred /** * Supported bitwise operations */ object BitwiseOp extends Enumeration { type Op = Value val AND = Value("AND") val OR = Value("OR") val XOR = Value("XOR") val NOT = Value("NOT") } /** * Methods for interacting with Redis Strings */ trait Strings extends Iface { /** Append a value to a key */ def append ( key: Key, value: String ): IntResult = getInt( "APPEND" ::: key :: value :: Cmd() ) /** Count set bits in a string */ def bitCount ( key: Key, start: Int, end: Int ): IntResult = getInt( "BITCOUNT" ::: key :: start :: end :: Cmd() ) /** Count set bits in a string */ def bitCount ( key: Key ): IntResult = getInt( "BITCOUNT" ::: key :: Cmd() ) /** Perform bitwise operations between strings */ def bitOp ( operation: BitwiseOp.Op, destkey: Key, key: Key, keys: Key* ): IntResult = getInt( "BITOP" ::: operation :: destkey :: key :: keys :: Cmd() ) /** Decrement the integer value of a key by one */ def decr ( key: Key ): IntResult = getInt( "DECR" ::: key :: Cmd() ) /** Decrement the integer value of a key by the given number */ def decrBy ( key: Key, decrement: Int ): IntResult = getInt( "DECRBY" ::: key :: decrement :: Cmd() ) /** Get the value of a key */ def get[A : Convert] ( key: Key ): OptBulkResult[A] = getOptBulk[A]( "GET" ::: key :: Cmd() ) /** Returns the bit value at offset in the string value stored at key */ def getBit ( key: Key, offset: Int ): BoolResult = getBool( "GETBIT" ::: key :: offset :: Cmd() ) /** Get a substring of the string stored at a key */ def getRange[A : Convert] ( key: Key, start: Int, end: Int ): BulkResult[A] = getBulk[A]( "GETRANGE" ::: key :: start :: end :: Cmd() ) /** Set the string value of a key and return its old value */ def getSet[A : Convert] ( key: Key, value: String ): OptBulkResult[A] = getOptBulk[A]( "GETSET" ::: key :: value :: Cmd() ) /** Increment the integer value of a key by one */ def incr ( key: Key ): IntResult = getInt( "INCR" ::: key :: Cmd() ) /** Increment the integer value of a key by the given amount */ def incrBy ( key: Key, increment: Int ): IntResult = getInt( "INCRBY" ::: key :: increment :: Cmd() ) /** Increment the float value of a key by the given amount */ def incrByFloat ( key: Key, increment: Double ): FloatResult = getFloat( "INCRBYFLOAT" ::: key :: increment :: Cmd() ) /** Get the values of all the given keys */ def mGet[A : Convert] ( key: Key, keys: Key* ): BulkSeqResult[A] = getBulkSeq[A]( "MGET" ::: key :: keys :: Cmd() ) /** Set multiple keys to multiple values */ def mSet ( pair: (Key, String), pairs: (Key, String)* ): AckResult = getAck( "MSET" ::: pair :: pairs :: Cmd() ) /** Set multiple keys to multiple values, only if none of the keys exist */ def mSetNX ( pair: (Key, String), pairs: (Key, String)* ): AckResult = getAck( "MSETNX" ::: pair :: pairs :: Cmd() ) /** Set the value and expiration in milliseconds of a key */ def pSetEx ( key: Key, milliseconds: Int, value: String ): AckResult = getAck( "PSETEX" ::: key :: milliseconds :: value :: Cmd() ) /** Set the string value of a key */ def set ( key: Key, value: String ): AckResult = getAck( "SET" ::: key :: value :: Cmd() ) /** Sets or clears the bit at offset in the string value stored at key */ def setBit ( key: Key, offset: Int, value: Boolean ): BoolResult = { val bit = if ( value ) 1 else 0 getBool( "SETBIT" ::: key :: offset :: bit :: Cmd() ) } /** Set the value and expiration of a key */ def setEx ( key: Key, seconds: Int, value: String ): AckResult = getAck( "SETEX" ::: key :: seconds :: value :: Cmd() ) /** Set the value of a key, only if the key does not exist */ def setNX ( key: Key, value: String ): AckResult = getAck( "SETNX" ::: key :: value :: Cmd() ) /** Overwrite part of a string at key starting at the specified offset */ def setRange ( key: Key, offset: Int, value: String ): IntResult = getInt( "SETRANGE" ::: key :: offset :: value :: Cmd() ) /** Get the length of the value stored in a key */ def strLen ( key: Key ): IntResult = getInt( "STRLEN" ::: key :: Cmd() ) }
Nycto/IsRed
src/main/scala/IsRed/iface/Strings.scala
Scala
mit
4,436
/*********************************************************************** * Copyright (c) 2013-2015 Commonwealth Computer Research, Inc. * All rights reserved. This program and the accompanying materials * are made available under the terms of the Apache License, Version 2.0 which * accompanies this distribution and is available at * http://www.opensource.org/licenses/apache2.0.php. *************************************************************************/ package org.locationtech.geomesa.tools import com.typesafe.scalalogging.slf4j.Logging import org.locationtech.geomesa.accumulo.data.AccumuloDataStore import org.locationtech.geomesa.accumulo.index._ import org.locationtech.geomesa.tools.commands.CreateFeatureParams import org.locationtech.geomesa.utils.geotools.SimpleFeatureTypes object FeatureCreator extends Logging { def createFeature(params: CreateFeatureParams): Unit = { val ds = new DataStoreHelper(params).getOrCreateDs createFeature(ds, params) } def createFeature(ds: AccumuloDataStore, params: CreateFeatureParams): Unit = createFeature( ds, params.spec, params.featureName, Option(params.dtgField), Option(params.useSharedTables), params.catalog) def createFeature(ds: AccumuloDataStore, sftspec: String, featureName: String, dtField: Option[String], sharedTable: Option[Boolean], catalog: String): Unit = { logger.info(s"Creating '$featureName' on catalog table '$catalog' with spec " + s"'$sftspec'. Just a few moments...") if (ds.getSchema(featureName) == null) { logger.info("Creating GeoMesa tables...") val sft = SimpleFeatureTypes.createType(featureName, sftspec) if (dtField.orNull != null) { // Todo: fix logic here, it is a bit strange sft.getUserData.put(SF_PROPERTY_START_TIME, dtField.getOrElse(Constants.SF_PROPERTY_START_TIME)) } sharedTable.foreach { org.locationtech.geomesa.accumulo.index.setTableSharing(sft, _) } ds.createSchema(sft) if (ds.getSchema(featureName) != null) { logger.info(s"Feature '$featureName' on catalog table '$catalog' with spec " + s"'$sftspec' successfully created.") println(s"Created feature $featureName") } else { logger.error(s"There was an error creating feature '$featureName' on catalog table " + s"'$catalog' with spec '$sftspec'. Please check that all arguments are correct " + "in the previous command.") } } else { logger.error(s"A feature named '$featureName' already exists in the data store with " + s"catalog table '$catalog'.") } } }
mcharles/geomesa
geomesa-tools/src/main/scala/org/locationtech/geomesa/tools/FeatureCreator.scala
Scala
apache-2.0
2,753
package org.cmt object Main { def main(args: Array[String]): Unit = { println("Hello world!") val a = 5 val b = 7 println(s"$a + $b = ${Math.add(a, b)}") } }
lightbend-training/course-management-tools
course-templates/scala-cmt-template-no-common/step_000_initial_state/src/main/scala/org/cmt/Main.scala
Scala
apache-2.0
182
package software.egger.jirapaymosync import com.atlassian.jira.rest.client.api.domain.Issue import software.egger.jirapaymosync.paymo.Task import scala.language.implicitConversions object TaskIssueImplicits { implicit def issuesToTaskListMatcher(jiraIssues: Iterable[Issue]): TaskListMatcher = new TaskListMatcher(jiraIssues) implicit def issueToTask(issue: Issue): Task = new Task(s"${issue.getKey} ${issue.getSummary}") }
eggeral/jira-paymo-sync
src/main/scala/software/egger/jirapaymosync/TaskIssueImplicits.scala
Scala
apache-2.0
436
/* Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. * */ package io.github.mandar2812.dynaml.algebra import breeze.generic.UFunc import breeze.linalg.operators._ import breeze.linalg.{DenseMatrix, DenseVector, scaleAdd} import io.github.mandar2812.dynaml.utils /** * @author mandar2812 date: 17/10/2016. * Reference implementations for linear algebra operations * on partitioned vectors and matrices. */ object PartitionedMatrixOps extends UFunc { /* * Addition Operations * * */ /** * Reference implementation for adding * two [[PartitionedPSDMatrix]] objects. * */ implicit object addPartitionedPSDMatAandB extends OpAdd.Impl2[PartitionedPSDMatrix, PartitionedPSDMatrix, PartitionedPSDMatrix] { def apply(a: PartitionedPSDMatrix, b: PartitionedPSDMatrix) = { require( a.rows == b.rows && a.cols == b.cols, "For matrix addition A + B, their dimensions must match") require( a.rowBlocks == b.rowBlocks && a.colBlocks == b.colBlocks, "For blocked matrix addition A + B, they must have equal number of blocks") val mat1 = a._data val mat2 = b._data new PartitionedPSDMatrix( mat1.zip(mat2).map(c => (c._1._1, c._1._2 + c._2._2)), a.rows, a.cols, a.rowBlocks, a.colBlocks) } } implicit object addPartitionedPSDMatAandMatB extends OpAdd.Impl2[PartitionedPSDMatrix, PartitionedMatrix, PartitionedMatrix] { def apply(a: PartitionedPSDMatrix, b: PartitionedMatrix) = addPartitionedMatAandB(a.asInstanceOf[PartitionedMatrix], b) } /** * Reference implementation for adding * two [[PartitionedMatrix]] objects. * */ implicit object addPartitionedMatAandB extends OpAdd.Impl2[PartitionedMatrix, PartitionedMatrix, PartitionedMatrix] { def apply(a: PartitionedMatrix, b: PartitionedMatrix) = { require( a.rows == b.rows && a.cols == b.cols, "For matrix addition A + B, their dimensions must match") require( a.rowBlocks == b.rowBlocks && a.colBlocks == b.colBlocks, "For blocked matrix addition A + B, they must have equal number of blocks") val mat1 = a._data val mat2 = b._data new PartitionedMatrix( mat1.zip(mat2).map(c => (c._1._1, c._1._2 + c._2._2)), a.rows, a.cols, a.rowBlocks, a.colBlocks) } } implicit object addPartitionedLMatAandUB extends OpSub.Impl2[LowerTriPartitionedMatrix, UpperTriPartitionedMatrix, PartitionedMatrix] { def apply(a: LowerTriPartitionedMatrix, b: UpperTriPartitionedMatrix) = { require( a.rows == b.rows && a.cols == b.cols, "For matrix addition A + B, their dimensions must match") require( a.rowBlocks == b.rowBlocks && a.colBlocks == b.colBlocks, "For blocked matrix addition A + B, they must have equal number of blocks") val mat1 = a._data val mat2 = b._data new PartitionedMatrix( mat1.zip(mat2).map(c => (c._1._1, c._1._2 + c._2._2)), a.rows, a.cols, a.rowBlocks, a.colBlocks) } } implicit object addPartitionedMatAandUB extends OpAdd.Impl2[PartitionedMatrix, UpperTriPartitionedMatrix, PartitionedMatrix] { def apply(a: PartitionedMatrix, b: UpperTriPartitionedMatrix) = { require( a.rows == b.rows && a.cols == b.cols, "For matrix addition A + B, their dimensions must match") require( a.rowBlocks == b.rowBlocks && a.colBlocks == b.colBlocks, "For blocked matrix addition A + B, they must have equal number of blocks") val mat1 = a._data val mat2 = b._data new PartitionedMatrix( mat1.zip(mat2).map(c => (c._1._1, c._1._2 + c._2._2)), a.rows, a.cols, a.rowBlocks, a.colBlocks) } } /** * Reference implementation for adding * two [[PartitionedVector]] objects. * */ implicit object addPartitionedVecAandB extends OpAdd.Impl2[PartitionedVector, PartitionedVector, PartitionedVector] { def apply(a: PartitionedVector, b: PartitionedVector) = { require( a.rows == b.rows, "For vector addition A + B, their dimensions must match") require( a.rowBlocks == b.rowBlocks, "For blocked vector addition A + B, they must have equal number of blocks") val mat1 = a._data val mat2 = b._data new PartitionedVector(mat1.zip(mat2).map(c => (c._1._1, c._1._2 + c._2._2)), a.rows, a.rowBlocks) } } /* * Subtraction * */ implicit object subPartitionedMatAandB extends OpSub.Impl2[PartitionedMatrix, PartitionedMatrix, PartitionedMatrix] { def apply(a: PartitionedMatrix, b: PartitionedMatrix) = { require( a.rows == b.rows && a.cols == b.cols, "For matrix addition A + B, their dimensions must match") require( a.rowBlocks == b.rowBlocks && a.colBlocks == b.colBlocks, "For blocked matrix addition A + B, they must have equal number of blocks") val mat1 = a._data val mat2 = b._data new PartitionedMatrix( mat1.zip(mat2).map(c => (c._1._1, c._1._2 - c._2._2)), a.rows, a.cols, a.rowBlocks, a.colBlocks) } } implicit object subPartitionedPSDMatAandMatB extends OpSub.Impl2[PartitionedPSDMatrix, PartitionedMatrix, PartitionedMatrix] { def apply(a: PartitionedPSDMatrix, b: PartitionedMatrix) = subPartitionedMatAandB(a.asInstanceOf[PartitionedMatrix], b) } implicit object subPartitionedLMatAandUB extends OpSub.Impl2[LowerTriPartitionedMatrix, UpperTriPartitionedMatrix, PartitionedMatrix] { def apply(a: LowerTriPartitionedMatrix, b: UpperTriPartitionedMatrix) = { require( a.rows == b.rows && a.cols == b.cols, "For matrix addition A + B, their dimensions must match") require( a.rowBlocks == b.rowBlocks && a.colBlocks == b.colBlocks, "For blocked matrix addition A + B, they must have equal number of blocks") val mat1 = a._data val mat2 = b._data new PartitionedMatrix( mat1.zip(mat2).map(c => (c._1._1, c._1._2 - c._2._2)), a.rows, a.cols, a.rowBlocks, a.colBlocks) } } implicit object subPartitionedVecAandB extends OpSub.Impl2[PartitionedVector, PartitionedVector, PartitionedVector] { def apply(a: PartitionedVector, b: PartitionedVector) = { require( a.rows == b.rows, "For vector addition A + B, their dimensions must match") require( a.rowBlocks == b.rowBlocks, "For blocked vector addition A + B, they must have equal number of blocks") val mat1 = a._data val mat2 = b._data new PartitionedVector(mat1.zip(mat2).map(c => (c._1._1, c._1._2 - c._2._2)), a.rows, a.rowBlocks) } } /* * Multiplication * */ implicit object multPartitionedVecAScalar extends OpMulMatrix.Impl2[PartitionedVector, Double, PartitionedVector] { def apply(a: PartitionedVector, b: Double) = a.map(c => (c._1, c._2*b)) } implicit object multSPartitionedVecAScalar extends OpMulScalar.Impl2[PartitionedVector, Double, PartitionedVector] { def apply(a: PartitionedVector, b: Double) = a.map(c => (c._1, c._2*b)) } implicit object multMPartitionedMatAScalar extends OpMulMatrix.Impl2[PartitionedMatrix, Double, PartitionedMatrix] { def apply(a: PartitionedMatrix, b: Double) = a.map(c => (c._1, c._2*b)) } implicit object elemWisemultPartitionedVecAVecB extends OpMulScalar.Impl2[PartitionedVector, PartitionedVector, PartitionedVector] { def apply(a: PartitionedVector, b: PartitionedVector) = { require(a.rows == b.rows, "For element wise multiplication, partitioned vectors must be of same dimension") require(a.rowBlocks == b.rowBlocks, "For element wise multiplication, partitioned vectors must have same number of blocks") PartitionedVector(a._data.zip(b._data).map(c => (c._1._1, c._1._2 *:* c._2._2)), a.rows) } } implicit object multSPartitionedMatAScalar extends OpMulScalar.Impl2[PartitionedMatrix, Double, PartitionedMatrix] { def apply(a: PartitionedMatrix, b: Double) = a.map(c => (c._1, c._2*b)) } implicit object multSPartitionedPSDMatAScalar extends OpMulMatrix.Impl2[PartitionedPSDMatrix, Double, PartitionedPSDMatrix] { def apply(a: PartitionedPSDMatrix, b: Double) = { require(b > 0.0, "PSD matrix can only be multiplied by positive number") new PartitionedPSDMatrix(a._underlyingdata.map(c => (c._1, c._2*b)), a.rows, a.cols, a.rowBlocks, a.colBlocks) } } implicit object multPartitionedMatAandB extends OpMulMatrix.Impl2[PartitionedMatrix, PartitionedMatrix, PartitionedMatrix] { def apply(a: PartitionedMatrix, b: PartitionedMatrix) = { require( a.cols == b.rows, "In matrix multiplication A.B, Num_Columns(A) = Num_Rows(B)") require( a.colBlocks == b.rowBlocks, "In matrix multiplication A.B, Num_Column_Blocks(A) = Num_Row_Blocks(B)") new PartitionedMatrix( utils.combine(Seq(a._data, b._data)) .filter(c => c.head._1._2 == c.last._1._1) .map(c => ((c.head._1._1, c.last._1._2), c.head._2*c.last._2)) .groupBy(_._1) .toStream .map(c => (c._1, c._2.map(_._2).reduce((a,b) => a+b)) ), a.rows, b.cols, a.rowBlocks, b.colBlocks) } } implicit object multPartitionedMatAVecB extends OpMulMatrix.Impl2[PartitionedMatrix, PartitionedVector, PartitionedVector] { def apply(a: PartitionedMatrix, b: PartitionedVector) = { require( a.cols == b.rows, "In matrix-vector multiplication A.b, Num_Columns(A) = Num_Rows(B)") require( a.colBlocks == b.rowBlocks, "In matrix-vector multiplication A.b, Num_Column_Blocks(A) = Num_Row_Blocks(B)") new PartitionedVector( utils.combine(Seq(a._data, b._data.map(c => ((c._1, 0L), c._2.toDenseMatrix.t)))) .filter(c => c.head._1._2 == c.last._1._1) .map(c => (c.head._1._1, c.head._2*c.last._2)) .groupBy(_._1) .toStream .map(c => (c._1, c._2.map(_._2).reduce((a,b) => a+b).toDenseVector) ), a.rows, a.rowBlocks) } } implicit object innerPartitionedVecAandB extends OpMulInner.Impl2[PartitionedVector, PartitionedVector, Double] { def apply(a: PartitionedVector, b: PartitionedVector) = { require( a.rows == b.rows, "In vector dot product A.B, their dimensions must match") a._data.zip(b._data).map(c => c._1._2 dot c._2._2).sum } } /** * Reference implementation taking outer product * between a [[PartitionedVector]] and [[PartitionedDualVector]] yielding * a [[PartitionedMatrix]]. * */ implicit object outMultPartitionedVecAandB extends OpMulMatrix.Impl2[PartitionedVector, PartitionedDualVector, PartitionedMatrix] { def apply(a: PartitionedVector, b: PartitionedDualVector) = { require( a.cols == b.rows, "In matrix multiplication A.B, Num_Columns(A) = Num_Rows(B)") new PartitionedMatrix( utils.combine(Seq(a._data, b.t._data)).toStream .map(c => ( (c.head._1, c.last._1), c.head._2 * c.last._2.t)), a.rows, b.cols, a.rowBlocks, b.colBlocks) } } /* * Division * */ implicit object elemWiseDivPartitionedVecAVecB extends OpDiv.Impl2[PartitionedVector, PartitionedVector, PartitionedVector] { def apply(a: PartitionedVector, b: PartitionedVector) = { require(a.rows == b.rows, "For element wise division, partitioned vectors must be of same dimension") require(a.rowBlocks == b.rowBlocks, "For element wise division, partitioned vectors must have same number of blocks") PartitionedVector(a._data.zip(b._data).map(c => (c._1._1, c._1._2 /:/ c._2._2)), a.rows) } } implicit object elemWiseDivPartitionedMatAMatB extends OpDiv.Impl2[PartitionedMatrix, PartitionedMatrix, PartitionedMatrix] { def apply(a: PartitionedMatrix, b: PartitionedMatrix) = { require(a.rows == b.rows && a.cols == b.cols, "For element wise division, partitioned matrices must be of same dimension") require(a.rowBlocks == b.rowBlocks && a.colBlocks == b.colBlocks, "For element wise division, partitioned matrices must have same number of blocks") PartitionedMatrix(a._data.zip(b._data).map(c => (c._1._1, c._1._2 /:/ c._2._2)), a.rows, a.cols) } } implicit object elemWiseModPartitionedVecAVecB extends OpMod.Impl2[PartitionedVector, PartitionedVector, PartitionedVector] { def apply(a: PartitionedVector, b: PartitionedVector) = { require(a.rows == b.rows, "For element wise modulo, partitioned vectors must be of same dimension") require(a.rowBlocks == b.rowBlocks, "For element wise modulo, partitioned vectors must have same number of blocks") PartitionedVector(a._data.zip(b._data).map(c => (c._1._1, c._1._2 %:% c._2._2)), a.rows) } } /* * In place update operations * */ implicit object inPlaceAddPartitionedVec extends OpAdd.InPlaceImpl2[PartitionedVector, PartitionedVector] { override def apply(v: PartitionedVector, v2: PartitionedVector): Unit = v._data.zip(v2._data).foreach(c => c._1._2 :+= c._2._2) } implicit object axpyPartitionedVec extends scaleAdd.InPlaceImpl3[PartitionedVector, Double, PartitionedVector] { override def apply(v: PartitionedVector, v2: Double, v3: PartitionedVector): Unit = { v :+= (v3*v2) } } implicit object inPlaceMultPartitionedVecAScalar extends OpMulScalar.InPlaceImpl2[PartitionedVector, Double] { override def apply(v: PartitionedVector, v2: Double): Unit = { v._data.foreach(x => x._2 :*= v2) } } }
transcendent-ai-labs/DynaML
dynaml-core/src/main/scala/io/github/mandar2812/dynaml/algebra/PartitionedMatrixOps.scala
Scala
apache-2.0
14,577
package me.heaton.implicits object FunctionImp { sealed trait Countable[T] { def count(t: T): Int } def getLarge[T](a: T, b: T)(implicit countable: Countable[T]) = if(countable.count(a) > countable.count(b)) a else b implicit lazy val stringCounter = new Countable[String] { def count(t: String): Int = t.split(" ").length } implicit lazy val intCounter = new Countable[Int] { def count(t: Int): Int = t } }
heaton/hello-scala
src/main/scala/me/heaton/implicits/FunctionImp.scala
Scala
mit
448
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.execution.datasources.v2 import scala.util.control.NonFatal import org.apache.spark.{SparkEnv, SparkException, TaskContext} import org.apache.spark.executor.CommitDeniedException import org.apache.spark.internal.Logging import org.apache.spark.rdd.RDD import org.apache.spark.sql.Row import org.apache.spark.sql.catalyst.InternalRow import org.apache.spark.sql.catalyst.encoders.{ExpressionEncoder, RowEncoder} import org.apache.spark.sql.catalyst.expressions.Attribute import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan import org.apache.spark.sql.execution.SparkPlan import org.apache.spark.sql.execution.streaming.{MicroBatchExecution, StreamExecution} import org.apache.spark.sql.execution.streaming.continuous.{CommitPartitionEpoch, ContinuousExecution, EpochCoordinatorRef, SetWriterPartitions} import org.apache.spark.sql.sources.v2.writer._ import org.apache.spark.sql.sources.v2.writer.streaming.StreamWriter import org.apache.spark.sql.types.StructType import org.apache.spark.util.Utils /** * The logical plan for writing data into data source v2. */ case class WriteToDataSourceV2(writer: DataSourceWriter, query: LogicalPlan) extends LogicalPlan { override def children: Seq[LogicalPlan] = Seq(query) override def output: Seq[Attribute] = Nil } /** * The physical plan for writing data into data source v2. */ case class WriteToDataSourceV2Exec(writer: DataSourceWriter, query: SparkPlan) extends SparkPlan { override def children: Seq[SparkPlan] = Seq(query) override def output: Seq[Attribute] = Nil override protected def doExecute(): RDD[InternalRow] = { val writeTask = writer match { case w: SupportsWriteInternalRow => w.createInternalRowWriterFactory() case _ => new InternalRowDataWriterFactory(writer.createWriterFactory(), query.schema) } val useCommitCoordinator = writer.useCommitCoordinator val rdd = query.execute() val messages = new Array[WriterCommitMessage](rdd.partitions.length) logInfo(s"Start processing data source writer: $writer. " + s"The input RDD has ${messages.length} partitions.") try { sparkContext.runJob( rdd, (context: TaskContext, iter: Iterator[InternalRow]) => DataWritingSparkTask.run(writeTask, context, iter, useCommitCoordinator), rdd.partitions.indices, (index, message: WriterCommitMessage) => { messages(index) = message writer.onDataWriterCommit(message) } ) logInfo(s"Data source writer $writer is committing.") writer.commit(messages) logInfo(s"Data source writer $writer committed.") } catch { case cause: Throwable => logError(s"Data source writer $writer is aborting.") try { writer.abort(messages) } catch { case t: Throwable => logError(s"Data source writer $writer failed to abort.") cause.addSuppressed(t) throw new SparkException("Writing job failed.", cause) } logError(s"Data source writer $writer aborted.") cause match { // Only wrap non fatal exceptions. case NonFatal(e) => throw new SparkException("Writing job aborted.", e) case _ => throw cause } } sparkContext.emptyRDD } } object DataWritingSparkTask extends Logging { def run( writeTask: DataWriterFactory[InternalRow], context: TaskContext, iter: Iterator[InternalRow], useCommitCoordinator: Boolean): WriterCommitMessage = { val stageId = context.stageId() val partId = context.partitionId() val attemptId = context.attemptNumber() val epochId = Option(context.getLocalProperty(MicroBatchExecution.BATCH_ID_KEY)).getOrElse("0") val dataWriter = writeTask.createDataWriter(partId, attemptId, epochId.toLong) // write the data and commit this writer. Utils.tryWithSafeFinallyAndFailureCallbacks(block = { iter.foreach(dataWriter.write) val msg = if (useCommitCoordinator) { val coordinator = SparkEnv.get.outputCommitCoordinator val commitAuthorized = coordinator.canCommit(context.stageId(), partId, attemptId) if (commitAuthorized) { logInfo(s"Writer for stage $stageId, task $partId.$attemptId is authorized to commit.") dataWriter.commit() } else { val message = s"Stage $stageId, task $partId.$attemptId: driver did not authorize commit" logInfo(message) // throwing CommitDeniedException will trigger the catch block for abort throw new CommitDeniedException(message, stageId, partId, attemptId) } } else { logInfo(s"Writer for partition ${context.partitionId()} is committing.") dataWriter.commit() } logInfo(s"Writer for stage $stageId, task $partId.$attemptId committed.") msg })(catchBlock = { // If there is an error, abort this writer logError(s"Writer for stage $stageId, task $partId.$attemptId is aborting.") dataWriter.abort() logError(s"Writer for stage $stageId, task $partId.$attemptId aborted.") }) } } class InternalRowDataWriterFactory( rowWriterFactory: DataWriterFactory[Row], schema: StructType) extends DataWriterFactory[InternalRow] { override def createDataWriter( partitionId: Int, attemptNumber: Int, epochId: Long): DataWriter[InternalRow] = { new InternalRowDataWriter( rowWriterFactory.createDataWriter(partitionId, attemptNumber, epochId), RowEncoder.apply(schema).resolveAndBind()) } } class InternalRowDataWriter(rowWriter: DataWriter[Row], encoder: ExpressionEncoder[Row]) extends DataWriter[InternalRow] { override def write(record: InternalRow): Unit = rowWriter.write(encoder.fromRow(record)) override def commit(): WriterCommitMessage = rowWriter.commit() override def abort(): Unit = rowWriter.abort() }
ddna1021/spark
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/WriteToDataSourceV2.scala
Scala
apache-2.0
6,760
package io.mth.lever import java.io.InputStream object LeverPipe { def pipe(in: InputStream, writable: {def write(b: Array[Byte], offset: Int, length: Int): Int}, buffersize: Int) = { val buffer = new Array[Byte](buffersize) Iterator.continually(in.read(buffer, 0, buffer.length)). takeWhile(_ != -1). foreach(writable.write(buffer, 0, _)) } }
markhibberd/lever
src/prod/io/mth/lever/LeverPipe.scala
Scala
bsd-3-clause
390
package com.spr.scala.impl import com.spr.scala.Bag /** * Demonstrates using `ArrayOps` implicit conversions with collection functions. Essentially, this shows off how to * both implement our `Bag` trait using an array as well as how we can automatically convert arrays into a collection * type that provides a much richer API than arrays normally do. */ // note two bits of syntax here: // 1. fields that double as constructor params can be placed in parenthesis after the class name // 2. implementing a trait uses "extends" just as it would when extending a class. there is no implements keyword class ImplicitArrayBag[+A](bag: Array[A]) extends Bag[A] { override def add[A1 >: A](item: A1): Bag[A1] = // the :+ and +: methods are used to add an item to a collection. // ordering works like this: // collection :+ item // item +: collection // the COLon goes on the COLlection side (as noted in the scaladocs) new ImplicitArrayBag(bag :+ item) override def remove(p: A => Boolean): Bag[A] = new ImplicitArrayBag(bag.filterNot(p)) override def map[B](f: A => B): Bag[B] = Bag(bag.map(f): _*) override def flatMap[B](f: A => Bag[B]): Bag[B] = // note that we can compose functions using the andThen method // also note that we can make rather simple lambda functions using the unnamed parameter syntax as shown below Bag(bag.flatMap(f.andThen(_.toArray)): _*) override def filter(p: A => Boolean): Bag[A] = Bag(bag.filter(p): _*) override def reduce[A1 >: A](acc: (A1, A1) => A1): Option[A1] = bag.reduceOption(acc) override def toArray[A1 >: A]: Array[A1] = Array(bag: _*) override def toStream: Stream[A] = Stream(bag: _*) override def contains(p: A => Boolean): Boolean = bag.exists(p) override def size: Int = bag.length override def isEmpty: Boolean = bag.isEmpty override def iterator: Iterator[A] = bag.iterator }
jvz/scala-for-java
src/main/scala/com/spr/scala/impl/ImplicitArrayBag.scala
Scala
apache-2.0
1,943
package au.com.dius.pact.consumer import org.specs2.mutable.Specification import au.com.dius.pact.consumer.Fixtures._ import au.com.dius.pact.model._ import scala.concurrent.duration.FiniteDuration import org.json4s.JsonAST.{JField, JString, JObject} import org.json4s.jackson.JsonMethods._ import scala.concurrent.ExecutionContext import java.util.concurrent.Executors import au.com.dius.pact.model.Interaction import au.com.dius.pact.model.dispatch.HttpClient import scala.util.Success import org.junit.runner.RunWith import org.specs2.runner.JUnitRunner import org.specs2.execute.Result @RunWith(classOf[JUnitRunner]) class MockProviderSpec extends Specification { implicit val executionContext = ExecutionContext.fromExecutor(Executors.newCachedThreadPool()) implicit val timeout = FiniteDuration(10L, "second") def verify:ConsumerTestVerification[Result] = { r:Result => if(r.isSuccess) { None } else { Some(r) } } //TODO: move PactServer startup and shutdown into an around function "Pact Mock Service Provider" should { "Respond to invalid and valid requests" in { val server = DefaultMockProvider.withDefaultConfig() val validRequest = request.copy(path = s"${server.config.url}/") val invalidRequest = request.copy(path = s"${server.config.url}/foo") val Success((codeResult, results)) = server.runAndClose[Result](pact) { val invalidResponse = HttpClient.run(invalidRequest) invalidResponse.map(_.status) must beEqualTo(500).await(timeout = timeout) //hit server with valid request val validResponse = HttpClient.run(validRequest) validResponse.map(_.status) must beEqualTo(response.status).await(timeout = timeout) } verify(codeResult) must beNone results.matched.size must === (1) results.unexpected.size must === (1) def compareRequests(actual: Request, expected: Request) = { actual.method must beEqualTo(expected.method) def trimHost(s: String) = s.replaceAll(server.config.url, "") trimHost(actual.path) must beEqualTo(trimHost(expected.path)) val expectedHeaders = expected.headers.getOrElse(Map()) actual.headers.map(_.filter(t => expectedHeaders.contains(t._1))) must beEqualTo(expected.headers) parse(actual.body.get) must beEqualTo(parse(expected.body.get)) } def compare(actual: Interaction, request:Request, response:Response) = { actual.description must beEqualTo(interaction.description) actual.providerState must beEqualTo(interaction.providerState) compareRequests(actual.request, request) def chunk(s:String) = s.replaceAll("\\n", "").replaceAll(" ", "").replaceAll("\\t", "").toLowerCase.take(10) actual.response.body.map(chunk) must beEqualTo(response.body.map(chunk)) actual.response.copy(body = None) must beEqualTo(response.copy(body = None)) } val expectedInvalidResponse = Response(500, Map("Access-Control-Allow-Origin" -> "*"), pretty(JObject(JField("error", JString("unexpected request")))), null) compareRequests(results.unexpected.head, invalidRequest) compare(results.matched.head, validRequest, response) } } }
caoquendo/pact-jvm
pact-jvm-consumer/src/test/scala/au/com/dius/pact/consumer/MockProviderSpec.scala
Scala
apache-2.0
3,271
package ucesoft.cbm.cpu import ucesoft.cbm.ChipID.ID import ucesoft.cbm.trace.{BreakType, CpuStepInfo, NoBreak, TraceListener} import ucesoft.cbm.{Chip, ChipID, Log} import scala.language.implicitConversions import java.io.PrintWriter import java.io.ObjectOutputStream import java.io.ObjectInputStream import java.util.Properties object Z80 { @inline private def hex2(data: Int) = "%02X".format(data & 0xffff) @inline private def hex4(data: Int) = "%04X".format(data & 0xffff) @inline private def WORD(h:Int,l:Int) = ((h << 8) | l) & 0xFFFF trait IOMemory { def in(addressHI:Int,addressLO:Int) : Int def out(addressHI:Int,addressLO:Int,value:Int) : Unit def internalOperation(cycles:Int,address:Int = 0) : Unit = {} } object EmptyIOMemory extends IOMemory { def in(addressHI:Int,addressLO:Int) = 0 def out(addressHI:Int,addressLO:Int,value:Int) : Unit = {} } class Context(val mem:Memory,val io:IOMemory) { var A1,F1,H1,L1,D1,E1,B1,C1 = 0 var halted = false var im = 0 var A = 0 var B = 0 var C = 0 var D = 0 var E = 0 var F = 0 var H = 0 var L = 0 var I = 0 var R = 0 var IX = 0 var IY = 0 var IFF1 = 0 var IFF2 = 0 var PC = 0 var SP = 0 var memptr = 0xFFFF var Q,lastQ = false private[this] var delayInt = false private[this] var additionalClockCycles = 0 var isIndexX = true var lastWrite = 0 final def copyQ : Unit = { lastQ = Q Q = false } // state def saveState(out:ObjectOutputStream) : Unit = { out.writeInt(im) out.writeInt(A1) out.writeInt(B1) out.writeInt(C1) out.writeInt(D1) out.writeInt(E1) out.writeInt(F1) out.writeInt(H1) out.writeInt(L1) out.writeInt(A) out.writeInt(B) out.writeInt(C) out.writeInt(D) out.writeInt(E) out.writeInt(F) out.writeInt(H) out.writeInt(L) out.writeInt(I) out.writeInt(R) out.writeInt(IX) out.writeInt(IY) out.writeInt(IFF1) out.writeInt(IFF2) out.writeInt(PC) out.writeInt(SP) out.writeBoolean(halted) out.writeBoolean(delayInt) out.writeInt(additionalClockCycles) out.writeInt(memptr) out.writeBoolean(Q) } def loadState(in:ObjectInputStream) : Unit = { im = in.readInt A1 = in.readInt B1 = in.readInt C1 = in.readInt D1 = in.readInt E1 = in.readInt F1 = in.readInt H1 = in.readInt L1 = in.readInt A = in.readInt B = in.readInt C = in.readInt D = in.readInt E = in.readInt F = in.readInt H = in.readInt L = in.readInt I = in.readInt R = in.readInt IX = in.readInt IY = in.readInt IFF1 = in.readInt IFF2 = in.readInt PC = in.readInt SP = in.readInt halted = in.readBoolean delayInt = in.readBoolean additionalClockCycles = in.readInt memptr = in.readInt Q = in.readBoolean } final def setAdditionalClockCycles(acs:Int) : Unit = additionalClockCycles += acs final def getAdditionalClockSycles : Int = { val acs = additionalClockCycles additionalClockCycles = 0 acs } final def mustDelayInt : Boolean = delayInt final def setDelayInt(value:Boolean) : Unit = delayInt = value /* | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 | |---|---|---|---|---|---|---|---| | S | Z | Y | H | X |P/V| N | C | |---|---|---|---|---|---|---|---| */ final val CFLAG = 0x1 final val NFLAG = 0x2 final val PFLAG = 0x4 final val XFLAG = 0x8 final val HFLAG = 0x10 final val YFLAG = 0x20 final val ZFLAG = 0x40 final val SFLAG = 0x80 // ========================================= FLAGS ========================================================= // pre calculated Sign, Zero and Parity flags for 0-255 values final val SZP : Array[Int] = { val szip = Array.ofDim[Int](0x100) for(i <- 0 to 0xFF) { if ((i & 0x80) > 0) szip(i) |= SFLAG if (i == 0) szip(i) |= ZFLAG var p = 0 var j = i for(_ <- 0 until 8) { p ^= j & 0x1 j >>= 1 } szip(i) |= (if (p != 0) 0 else PFLAG) } szip } final def zero: Int = F & ZFLAG final def sign: Int = F & SFLAG final def carry: Int = F & CFLAG final def negative: Int = F & NFLAG final def parity: Int = F & PFLAG final def half: Int = F & HFLAG final def xf : Int = F & XFLAG final def yf : Int = F & YFLAG final def setZero(set:Boolean): Unit = if (set) F |= ZFLAG else F &= ~ZFLAG final def setSign(set:Boolean): Unit = if (set) F |= SFLAG else F &= ~SFLAG final def setCarry(set:Boolean): Unit = if (set) F |= CFLAG else F &= ~CFLAG final def setNegative(set:Boolean): Unit = if (set) F |= NFLAG else F &= ~NFLAG final def setParity(set:Boolean): Unit = if (set) F |= PFLAG else F &= ~PFLAG final def setHalf(set:Boolean): Unit = if (set) F |= HFLAG else F &= ~HFLAG final def setXY(result:Int) : Unit = F = (F & 0xD7) | result & 0x28 // ========================================================================================================= final def incPC(value:Int = 1) : Unit = PC = (PC + value) & 0xFFFF final def AF1: Int = WORD(A1,F1) final def HL1: Int = WORD(H1,L1) final def DE1: Int = WORD(D1,E1) final def BC1: Int = WORD(B1,C1) final def IR: Int = WORD(I,R) final def AF: Int = WORD(A,F) final def HL: Int = WORD(H,L) final def BC: Int = WORD(B,C) final def DE: Int = WORD(D,E) final def AF_=(w:Int): Unit = { A = (w >> 8) & 0xFF F = w & 0xFF } final def HL_=(w:Int): Unit = { H = (w >> 8) & 0xFF L = w & 0xFF } final def BC_=(w:Int) : Unit = { B = (w >> 8) & 0xFF C = w & 0xFF } final def DE_=(w:Int) : Unit = { D = (w >> 8) & 0xFF E = w & 0xFF } final def AF1_=(w:Int): Unit = { A1 = (w >> 8) & 0xFF F1 = w & 0xFF } final def HL1_=(w:Int): Unit = { H1 = (w >> 8) & 0xFF L1 = w & 0xFF } final def BC1_=(w:Int) : Unit = { B1 = (w >> 8) & 0xFF C1 = w & 0xFF } final def DE1_=(w:Int) : Unit = { D1 = (w >> 8) & 0xFF E1 = w & 0xFF } final def incDecBC(inc:Boolean) : Unit = { if (inc) { C += 1 if (C == 0x100) { C = 0 B = (B + 1) & 0xFF } } else { C -= 1 if (C == -1) { C = 0xFF B = (B - 1) & 0xFF } } } final def incDecDE(inc:Boolean) : Unit = { if (inc) { E += 1 if (E == 0x100) { E = 0 D = (D + 1) & 0xFF } } else { E -= 1 if (E == -1) { E = 0xFF D = (D - 1) & 0xFF } } } final def incDecHL(inc:Boolean) : Unit = { if (inc) { L += 1 if (L == 0x100) { L = 0 H = (H + 1) & 0xFF } } else { L -= 1 if (L == -1) { L = 0xFF H = (H - 1) & 0xFF } } } final def incDecSP(inc:Boolean) : Unit = if (inc) SP = (SP + 1) & 0xFFFF else SP = (SP - 1) & 0xFFFF @inline private def incDecIX(inc:Boolean) : Unit = if (inc) IX = (IX + 1) & 0xFFFF else IX = (IX - 1) & 0xFFFF @inline private def incDecIY(inc:Boolean) : Unit = if (inc) IY = (IY + 1) & 0xFFFF else IY = (IY - 1) & 0xFFFF final def incDecIndex(inc:Boolean) : Unit = if (isIndexX) incDecIX(inc) else incDecIY(inc) @inline private def IX_+(d:Int): Int = (IX + d.asInstanceOf[Byte]) & 0xFFFF @inline private def IY_+(d:Int): Int = (IY + d.asInstanceOf[Byte]) & 0xFFFF final def INDEX_+(d:Int,iop:Boolean = true): Int = { if (iop) io.internalOperation(5,PC) memptr = if (isIndexX) IX_+(d) else IY_+(d) memptr } final def INDEX : Int = if (isIndexX) IX else IY final def INDEX_=(value:Int) : Unit = if (isIndexX) IX = value else IY = value final def EX_SP_IX : Unit = { val tmp = readW(SP) io.internalOperation(1,(SP + 1) & 0xFFFF) writeW(SP,INDEX) io.internalOperation(2,SP) INDEX = tmp memptr = tmp } final def EX_SP_HL : Unit = { val tmp = readW(SP) io.internalOperation(1,(SP + 1) & 0xFFFF) writeW(SP,HL) io.internalOperation(2,SP) HL = tmp memptr = tmp } final def EX_AF : Unit = { var tmp = A A = A1 A1 = tmp tmp = F F = F1 F1 = tmp } final def EX_DE_HL : Unit = { var tmp = D D = H H = tmp tmp = E E = L L = tmp } final def EXX : Unit = { // BC <=> BC' var tmp = B B = B1 B1 = tmp tmp = C C = C1 C1 = tmp // DE <=> DE' tmp = D D = D1 D1 = tmp tmp = E E = E1 E1 = tmp // HL <=> HL' tmp = H H = H1 H1 = tmp tmp = L L = L1 L1 = tmp } final def IXL: Int = IX & 0xFF final def IXH : Int = (IX >> 8) & 0xFF final def IYL: Int = IY & 0xFF final def IYH : Int = (IY >> 8) & 0xFF final def INDEX_L : Int = if (isIndexX) IXL else IYL final def INDEX_H : Int = if (isIndexX) IXH else IYH final def INDEX_L_=(value:Int) : Unit = if (isIndexX) IXL = value else IYL = value final def INDEX_H_=(value:Int) : Unit = if (isIndexX) IXH = value else IYH = value final def IXH_=(value:Int) : Unit = IX = ((value & 0xFF) << 8) | IX & 0xFF final def IXL_=(value:Int) : Unit = IX = IX & 0xFF00 | value & 0xFF final def IYH_=(value:Int) : Unit = IY = ((value & 0xFF) << 8) | IY & 0xFF final def IYL_=(value:Int) : Unit = IY = IY & 0xFF00 | value & 0xFF final def push(w:Int) : Unit = { SP = (SP - 1) & 0xFFFF mem.write(SP,(w >> 8) & 0xFF,ChipID.CPU) SP = (SP - 1) & 0xFFFF mem.write(SP,w & 0xFF,ChipID.CPU) } final def pop: Int = { var popped = mem.read(SP) SP = (SP + 1) & 0xFFFF popped |= mem.read(SP) << 8 SP = (SP + 1) & 0xFFFF popped } final def byte(offset:Int): Int = mem.read((PC + offset) & 0xFFFF) final def word(offset:Int): Int = mem.read((PC + offset + 1) & 0xFFFF) << 8 | mem.read((PC + offset) & 0xFFFF) final def read(address:Int,iop:Int = 0): Int = { val r = mem.read(address) if (iop > 0) io.internalOperation(iop,address) r } final def readW(address:Int): Int = mem.read(address) | mem.read((address + 1) & 0xFFFF) << 8 final def write(address:Int,value:Int): Unit = { mem.write(address,value,ChipID.CPU) lastWrite = value } final def writeW(address:Int,value:Int): Unit = { mem.write(address,value & 0xFF,ChipID.CPU) mem.write((address + 1) & 0xFFFF,(value >> 8) & 0xFF,ChipID.CPU) } final def reset : Unit = { AF = 0xFFFF SP = 0xFFFF PC = 0 IFF1 = 0 IFF2 = 0 im = 0 halted = false IX = 0 IY = 0 BC = 0 DE = 0 HL = 0 AF1 = 0 BC1 = 0 DE1 = 0 HL1 = 0 I = 0 R = 0 isIndexX = true memptr = 0xFFFF } override def toString = s"PC=${hex4(PC)} AF=${hex4(AF)} BC=${hex4(BC)} DE=${hex4(DE)} HL=${hex4(HL)} IX=${hex4(IX)} IY=${hex4(IY)} I=${hex2(I)} im=$im SP=${hex2(SP)} SZYHXPNC=${sr2String}" @inline private def sr2String = { val sb = new StringBuilder if (sign > 0) sb += 'S' else sb += '-' if (zero > 0) sb += 'Z' else sb += '-' if (yf > 0) sb += 'Y' else sb += '-' if (half > 0) sb += 'H' else sb += '-' if (xf > 0) sb += 'X' else sb += '-' if (parity > 0) sb += 'P' else sb += '-' if (carry > 0) sb += 'C' else sb += '-' sb.toString } // ============================================================= final def add(value: Int): Unit = { val tmp = (A + value) & 0xFF F = SZP(tmp) ; Q = true setCarry(((A + value) & 0x100) > 0) setHalf(((A ^ value ^ tmp) & 0x10) > 0) setParity(((~(A ^ value)) & (A ^ tmp) & 0x80) > 0) A = tmp setXY(tmp) } final def adc(value: Int): Unit = { val oldCarry = carry val tmp = (A + value + oldCarry) & 0xFF F = SZP(tmp) ; Q = true setCarry(((A + value + oldCarry) & 0x100) > 0) setHalf(((A ^ value ^ tmp) & 0x10) > 0) setParity(((~(A ^ value)) & (A ^ tmp) & 0x80) > 0) A = tmp setXY(tmp) } final def sub(value: Int): Unit = { val tmp = (A - value) & 0xFF F = SZP(tmp) ; Q = true setNegative(true) setCarry(value > A) setHalf(((A ^ value ^ tmp) & 0x10) > 0) setParity(((A ^ value) & (A ^ tmp) & 0x80) > 0) A = tmp setXY(tmp) } final def sbc(value: Int): Unit = { val oldCarry = carry val tmp = (A - value - oldCarry) & 0xFF F = SZP(tmp) ; Q = true setNegative(true) setCarry(value + oldCarry > A) setHalf(((A ^ value ^ tmp) & 0x10) > 0) setParity(((A ^ value) & (A ^ tmp) & 0x80) > 0) A = tmp setXY(tmp) } final def and(value: Int): Unit = { A &= value F = SZP(A) ; Q = true setHalf(true) setXY(A) } final def xor(value: Int): Unit = { A ^= value F = SZP(A) ; Q = true setXY(A) } final def or(value: Int): Unit = { A |= value F = SZP(A) ; Q = true setXY(A) } final def cpl : Unit = { A = ~A & 0xFF setHalf(true) setNegative(true) setXY(A) Q = true } final def ccf : Unit = { val q = if (lastQ) F else 0 F = F & 0xC4 | (if ((F & 1) != 0) 0x10 else 1) | ((q ^ F) | A) & 0x28 Q = true } final def scf : Unit = { val q = if (lastQ) F else 0 F = F & 0xC4 | ((q ^ F) | A) & 0x28 | 1 Q = true } final def daa : Unit = { var c,d = 0 if (A > 0x99 || carry > 0) { c = 1 d = 0x60 } if ((A & 0xF) > 9 || half != 0) d += 6 val oldA = A A += (if (negative > 0) -d else d) A &= 0xFF F = SZP(A) | (A ^ oldA) & HFLAG | F & 0x2 | c ; Q = true setXY(A) } final def cp(value: Int): Unit = { val tmp = (A - value) & 0xFF F = SZP(tmp) ; Q = true setNegative(true) setCarry(value > A) setHalf(((A ^ value ^ tmp) & 0x10) > 0) setParity(((A ^ value) & (A ^ tmp) & 0x80) > 0) setXY(value) } final def addHLBC : Unit = { memptr = (HL + 1) & 0xFFFF io.internalOperation(7,IR) val tmp = HL + BC setNegative(false) setCarry((tmp & 0x10000) > 0) setHalf((((tmp >> 8) ^ H ^ B) & 0x10) > 0) HL = tmp setXY(tmp >> 8) Q = true } final def addHLDE : Unit = { memptr = (HL + 1) & 0xFFFF io.internalOperation(7,IR) val tmp = HL + DE setNegative(false) setCarry((tmp & 0x10000) > 0) setHalf((((tmp >> 8) ^ H ^ D) & 0x10) > 0) HL = tmp setXY(tmp >> 8) Q = true } final def addHLHL : Unit = { memptr = (HL + 1) & 0xFFFF io.internalOperation(7,IR) val tmp = HL + HL setNegative(false) setCarry((tmp & 0x10000) > 0) setHalf((((tmp >> 8) ^ H ^ H) & 0x10) > 0) HL = tmp setXY(tmp >> 8) Q = true } final def addHLSP : Unit = { memptr = (HL + 1) & 0xFFFF io.internalOperation(7,IR) val tmp = HL + SP setNegative(false) setCarry((tmp & 0x10000) > 0) setHalf((((tmp >> 8) ^ H ^ ((SP >> 8) & 0xFF)) & 0x10) > 0) HL = tmp setXY(tmp >> 8) Q = true } final def addIXBC : Unit = { memptr = (INDEX + 1) & 0xFFFF io.internalOperation(7,IR) val tmp = INDEX + BC setNegative(false) setCarry((tmp & 0x10000) > 0) setHalf((((tmp >> 8) ^ ((INDEX >> 8) & 0xFF) ^ B) & 0x10) > 0) INDEX = tmp & 0xFFFF setXY(tmp >> 8) Q = true } final def addIXDE : Unit = { memptr = (INDEX + 1) & 0xFFFF io.internalOperation(7,IR) val tmp = INDEX + DE setNegative(false) setCarry((tmp & 0x10000) > 0) setHalf((((tmp >> 8) ^ ((INDEX >> 8) & 0xFF) ^ D) & 0x10) > 0) INDEX = tmp & 0xFFFF setXY(tmp >> 8) Q = true } final def addIXSP : Unit = { memptr = (INDEX + 1) & 0xFFFF io.internalOperation(7,IR) val tmp = INDEX + SP setNegative(false) setCarry((tmp & 0x10000) > 0) setHalf((((tmp >> 8) ^ ((INDEX >> 8) & 0xFF) ^ ((SP >> 8) & 0xFF)) & 0x10) > 0) INDEX = tmp & 0xFFFF setXY(tmp >> 8) Q = true } final def addIXIX : Unit = { memptr = (INDEX + 1) & 0xFFFF io.internalOperation(7,IR) val tmp = INDEX + INDEX setNegative(false) setCarry((tmp & 0x10000) > 0) setHalf((((tmp >> 8) ^ ((INDEX >> 8) & 0xFF) ^ ((INDEX >> 8) & 0xFF)) & 0x10) > 0) INDEX = tmp & 0xFFFF setXY(tmp >> 8) Q = true } final def adcHL(value: Int): Unit = { memptr = (HL + 1) & 0xFFFF io.internalOperation(7,IR) val valueH = (value >> 8) & 0xFF val tmp = HL + value + carry setZero((tmp & 0xFFFF) == 0) setSign((tmp & 0x8000) > 0) setNegative(false) setCarry((tmp & 0x10000) > 0) setHalf((((tmp >> 8) ^ H ^ valueH) & 0x10) > 0) setParity(((~(H ^ valueH)) & (valueH ^ (tmp >> 8)) & 0x80) > 0) HL = tmp setXY(tmp >> 8) Q = true } final def sbcHL(value: Int): Unit = { memptr = (HL + 1) & 0xFFFF io.internalOperation(7,IR) val valueH = (value >> 8) & 0xFF val tmp = HL - value - carry setZero((tmp & 0xFFFF) == 0) setSign((tmp & 0x8000) > 0) setNegative(true) setCarry((tmp & 0x10000) > 0) setHalf((((tmp >> 8) ^ H ^ valueH) & 0x10) > 0) setParity(((H ^ (tmp >> 8)) & (H ^ valueH) & 0x80) > 0) HL = tmp setXY(tmp >> 8) Q = true } final def rotLC(value: Int) = { val h = (value & 0x80) >> 7 val rot = (value << 1 | h) & 0xFF F = SZP(rot) ; Q = true setCarry(h > 0) setXY(rot) rot } final def rotRC(value: Int) = { val oldCarry = value & 0x01 val rot = (value >> 1 | oldCarry << 7) & 0xFF F = SZP(rot) ; Q = true setCarry(oldCarry > 0) setXY(rot) rot } final def rotL(value: Int) = { val oldCarry = carry val h = (value & 0x80) >> 7 val rot = (value << 1 | oldCarry) & 0xFF F = SZP(rot) ; Q = true setCarry(h > 0) setXY(rot) rot } final def rotR(value: Int) = { val oldCarry = carry val l = (value & 0x01) val rot = (value >> 1 | oldCarry << 7) & 0xFF F = SZP(rot) ; Q = true setCarry(l > 0) setXY(rot) rot } final def sla(value: Int, bit0: Int = 0) = { val h = (value & 0x80) val shift = bit0 | (value << 1) & 0xFF F = SZP(shift) ; Q = true setCarry(h > 0) setXY(shift) shift } final def sra(value: Int) = { val l = (value & 0x01) val h = (value & 0x80) val shift = (value >> 1 | h) & 0xFF F = SZP(shift) ; Q = true setCarry(l > 0) setXY(shift) shift } final def srl(value: Int) = { val l = value & 0x01 val shift = (value >> 1) & 0xFF F = SZP(shift) ; Q = true setCarry(l > 0) setXY(shift) shift } final def rlca(): Unit = { val value = A val h = (value & 0x80) >> 7 val rot = (value << 1 | h) & 0xFF setHalf(false) setNegative(false) setCarry(h > 0) A = rot setXY(A) Q = true } final def rrca(): Unit = { val value = A val oldCarry = value & 0x01 val rot = (value >> 1 | oldCarry << 7) & 0xFF setHalf(false) setNegative(false) setCarry(oldCarry > 0) A = rot setXY(rot) Q = true } final def rla(): Unit = { val value = A val oldCarry = carry val h = value & 0x80 val rot = (value << 1 | oldCarry) & 0xFF setHalf(false) setNegative(false) setCarry(h > 0) A = rot setXY(rot) Q = true } final def rra(): Unit = { val value = A val oldCarry = carry val l = value & 0x01 val rot = (value >> 1 | oldCarry << 7) & 0xFF setHalf(false) setNegative(false) setCarry(l > 0) A = rot setXY(rot) Q = true } final def rld : Unit = { memptr = (HL + 1) & 0xFFFF val memHL = read(HL) io.internalOperation(4,HL) write(HL,(memHL & 0x0F) << 4 | A & 0x0F) A = A & 0xF0 | (memHL & 0xF0) >> 4 F = SZP(A) | carry ; Q = true setHalf(false) setNegative(false) setXY(A) } final def rrd : Unit = { memptr = (HL + 1) & 0xFFFF val memHL = read(HL) io.internalOperation(4,HL) write(HL,(A & 0x0F) << 4 | (memHL & 0xF0) >> 4) A = A & 0xF0 | memHL & 0x0F F = SZP(A) | carry ; Q = true setHalf(false) setNegative(false) setXY(A) } final def bit(b: Int, value: Int, opType: Int = 0): Unit = { val isZero = (value & (1 << b)) == 0 Q = true setZero(isZero) setParity(isZero) setHalf(true) setNegative(false) setSign(b == 7 && (value & 0x80) > 0) opType match { case 0 => // BIT b,r setXY(value) case 1 => // BIT b,(IX + d) //setXY(addr) setXY(memptr >> 8) case 2 => // BIT b,(HL) setXY(memptr >> 8) } } final def res(b:Int,value:Int) = value & ~(1 << b) final def set(b:Int,value:Int) = value | (1 << b) final def jre_e : Unit = { io.internalOperation(5,PC) val addr = (PC + 2 + byte(1).asInstanceOf[Byte]) & 0xFFFF PC = addr memptr = addr } final def jp_cond_nn(cond: Boolean): Unit = { val ofs = word(1) if (cond) PC = ofs else PC = (PC + 3) & 0xFFFF memptr = ofs } final def jr_cond_e(cond: Boolean): Unit = { val ofs = byte(1).asInstanceOf[Byte] if (cond) { io.internalOperation(5,PC) PC = (PC + 2 + ofs) & 0xFFFF memptr = PC } else { PC = (PC + 2) & 0xFFFF setAdditionalClockCycles(-5) } } final def call(addr: Int): Unit = { push((PC + 3) & 0xFFFF) PC = addr memptr = addr } final def call_cond_nn(cond: Boolean): Unit = { val tmp = word(1) if (cond) { io.internalOperation(1,(PC + 1) & 0xFFFF) call(tmp) setAdditionalClockCycles(7) } else { PC = (PC + 3) & 0xFFFF memptr = tmp } } final def ret_cond(cond: Boolean): Unit = { io.internalOperation(1,IR) if (cond) { PC = pop setAdditionalClockCycles(6) } else PC = (PC + 1) & 0xFFFF } final def retni(): Unit = { PC = pop IFF1 = IFF2 memptr = PC } final def rst(pcl: Int): Unit = { io.internalOperation(1,IR) push((PC + 1) & 0xFFFF) PC = pcl memptr = PC } final def in_a_n : Unit = { val port = byte(1) memptr = ((A << 8) + port + 1) & 0xFFFF A = io.in(A,port) } final def in_r_c() = { val v = io.in(B,C) F = SZP(v) | carry ; Q = true setHalf(false) setNegative(false) setXY(v) memptr = (BC + 1) & 0xFFFF v } final def ini(inc: Boolean): Unit = { io.internalOperation(1,IR) val tmp = io.in(B,C) write(HL,tmp) incDecHL(inc) memptr = if (inc) (BC + 1) & 0xFFFF else (BC - 1) & 0xFFFF B = (B - 1) & 0xFF F = SZP(B) ; Q = true setNegative((tmp & 0x80) > 0) val tmp2 = if (inc) (tmp + C + 1) & 0xFF else (tmp + C - 1) & 0xFF val parity = (SZP((tmp2 & 0x07) ^ B) & 0x04) > 0 setParity(parity) setHalf(tmp2 < tmp) setCarry(tmp2 < tmp) setXY(B) } final def out_c_r(value:Int) : Unit = { io.out(B,C,value) memptr = (BC + 1) & 0xFFFF } /* * tmp := (hl), ((c)) := tmp, hl += 1, b -= 1 => flags, nf := tmp.7, tmp2 = tmp + l, pf := parity of [[tmp2 AND 0x07] XOR b], hf := cf := tmp2 > 255 */ final def outi(inc: Boolean): Unit = { io.internalOperation(1,IR) val tmp = read(HL) incDecHL(inc) B = (B - 1) & 0xFF io.out(B,C,tmp) memptr = if (inc) (BC + 1) & 0xFFFF else (BC - 1) & 0xFFFF F = SZP(B) ; Q = true setNegative((tmp & 0x80) > 0) val tmp2 = tmp + L val parity = (SZP((tmp2 & 0x07) ^ B) & 0x04) > 0 setParity(parity) setHalf(tmp2 > 0xFF) setCarry(tmp2 > 0xFF) setXY(B) } final def neg : Unit = { val tmp = (0 - A) & 0xFF F = SZP(tmp) ; Q = true setNegative(true) setHalf(((A ^ tmp) & 0x10) > 0) setParity((A & tmp & 0x80) > 0) setCarry(A > 0) A = tmp setXY(tmp) } final def incR(deltaR:Int) = R = (R & 0x80) | (R + deltaR) & 0x7F final def ldi : Unit = { val tmp = read(HL) val tmp2 = A + tmp val de = DE write(DE,tmp) ; incDecDE(true) ; incDecHL(true) ; incDecBC(false) io.internalOperation(2,de) F = F & 0xC1 ; Q = true setParity(BC != 0) setNegative(false) F = F & 0xD7 | tmp2 & 0x8 | (tmp2 & 0x2) << 4 } final def ldir : Unit = { val tmp = read(HL) val tmp2 = A + tmp val de = DE write(DE,tmp) ; incDecDE(true) ; incDecHL(true) ; incDecBC(false) io.internalOperation(2,de) F &= 0xC1 ; Q = true setParity(BC != 0) setHalf(false) if (BC == 0) { PC = (PC + 2) & 0xFFFF setAdditionalClockCycles(-5) } else { io.internalOperation(5,de) memptr = (PC + 1) & 0xFFFF } F = F & 0xD7 | tmp2 & 0x8 | (tmp2 & 0x2) << 4 } final def ldd : Unit = { val tmp = read(HL) val tmp2 = A + tmp val de = DE write(DE,tmp) ; incDecDE(false) ; incDecHL(false) ; incDecBC(false) io.internalOperation(2,de) F &= 0xC1 ; Q = true setParity(BC != 0) setNegative(false) F = F & 0xD7 | tmp2 & 0x8 | (tmp2 & 0x2) << 4 } final def lddr : Unit = { val tmp = read(HL) val tmp2 = A + tmp val de = DE write(DE,tmp) ; incDecDE(false) ; incDecHL(false) ; incDecBC(false) io.internalOperation(2,de) F &= 0xC1 ; Q = true setParity(BC != 0) setHalf(false) if (BC == 0) { PC = (PC + 2) & 0xFFFF setAdditionalClockCycles(-5) } else { io.internalOperation(5,de) memptr = (PC + 1) & 0xFFFF } F = F & 0xD7 | tmp2 & 0x8 | (tmp2 & 0x2) << 4 } final def cpi : Unit = { val value = read(HL) io.internalOperation(5,HL) var cmp = (A - value) & 0xFF incDecHL(true) ; incDecBC(false) F = carry | SZP(cmp) ; Q = true setParity(BC != 0) setNegative(true) setHalf(((A ^ value ^ cmp) & 0x10) > 0) if (half > 0) cmp -= 1 F = F & 0xD7 | cmp & 0x8 | (cmp & 0x2) << 4 memptr = (memptr + 1) & 0xFFFF } final def cpir : Unit = { val value = read(HL) io.internalOperation(5,HL) var cmp = (A - value) & 0xFF val hl = HL incDecHL(true) ; incDecBC(false) F = carry | SZP(cmp) ; Q = true setParity(BC != 0) setNegative(true) setHalf(((A ^ value ^ cmp) & 0x10) > 0) if (BC == 0 || cmp == 0) { PC = (PC + 2) & 0xFFFF setAdditionalClockCycles(-5) memptr = (memptr + 1) & 0xFFFF } else { io.internalOperation(5,hl) memptr = (PC + 1) & 0xFFFF } if (half > 0) cmp -= 1 F = F & 0xD7 | cmp & 0x8 | (cmp & 0x2) << 4 } final def cpdr : Unit = { val value = read(HL) io.internalOperation(5,HL) var cmp = (A - value) & 0xFF val hl = HL incDecHL(false) ; incDecBC(false) F = carry | SZP(cmp) ; Q = true setParity(BC != 0) setNegative(true) setHalf(((A ^ value ^ cmp) & 0x10) > 0) if (BC == 0 || cmp == 0) { PC = (PC + 2) & 0xFFFF setAdditionalClockCycles(-5) memptr = (memptr - 1) & 0xFFFF } else { io.internalOperation(5,hl) memptr = (PC + 1) & 0xFFFF } if (half > 0) cmp -= 1 F = F & 0xD7 | cmp & 0x8 | (cmp & 0x2) << 4 } final def cpd : Unit = { val value = read(HL) io.internalOperation(5,HL) var cmp = (A - value) & 0xFF incDecHL(false) ; incDecBC(false) F = carry | SZP(cmp) ; Q = true setParity(BC != 0) setNegative(true) setHalf(((A ^ value ^ cmp) & 0x10) > 0) if (half > 0) cmp -= 1 F = F & 0xD7 | cmp & 0x8 | (cmp & 0x2) << 4 memptr = (memptr - 1) & 0xFFFF } final def djnz : Unit = { io.internalOperation(1,IR) val ofs = byte(1).asInstanceOf[Byte] B = (B - 1) & 0xFF if (B == 0) PC = (PC + 2) & 0xFFFF else { setAdditionalClockCycles(5) io.internalOperation(5,PC) PC = (PC + 2 + ofs) & 0xFFFF memptr = PC } } } private implicit def int2Array(x:Int): Array[Int] = Array(x) private implicit def tuple2Array(x:Tuple2[Int,Int]): Array[Int] = Array(x._1,x._2) private implicit def tuple3Array(x:Tuple3[Int,Int,Int]): Array[Int] = Array(x._1,x._2,x._3) private implicit def string2Mnem(s:String): (Memory, Int) => String = (Memory, Int) => s private class FD(exe : Context => Unit) extends (Context => Unit) { def apply(ctx : Context) : Unit = { try { ctx.isIndexX = false exe(ctx) } finally { ctx.isIndexX = true } } } private case class Opcode(opcodes:Array[Int], cycles:Int, size:Int, getMnemonic : (Memory,Int) => String, modifyPC:Boolean = false, copyopcodes:Array[Int] = null)(val executeFunction:Context => Unit) { def disassemble(mem:Memory,pc:Int) : String = { val sb = new StringBuilder(hex4(pc) + " ") var s = pc while (s < pc + size) { sb.append(hex2(mem.read(s & 0xFFFF)) + " ") s += 1 } var spaces = 20 - sb.length while (spaces > 0) { sb += ' ' spaces -= 1 } sb.toString + getMnemonic(mem,pc) } } private val opcodes_1, opcodes_ed, opcodes_cb, opcodes_dd, opcodes_fd, opcodes_ddcb, opcodes_fdcb = Array.ofDim[Opcode](256) // ================================== LOAD 8 bit =========================================================== private val LD_A_I = Opcode((0xED,0x57),9,2,"LD A,I") { ctx => ctx.io.internalOperation(1,ctx.IR) ctx.A = ctx.I ctx.F = ctx.SZP(ctx.A) | ctx.carry ; ctx.Q = true ctx.setParity(ctx.IFF2 > 0) ctx.setXY(ctx.A) } private val LD_A_R = Opcode((0xED,0x5F),9,2,"LD A,R") { ctx => ctx.io.internalOperation(1,ctx.IR) ctx.A = ctx.R ctx.F = ctx.SZP(ctx.A) | ctx.carry ; ctx.Q = true ctx.setParity(ctx.IFF2 > 0) ctx.setXY(ctx.R) } // *** LD r,r' // ************** private val LD_A_A = Opcode(0x7F,4,1,"LD A,A") { ctx => /*ctx.A = ctx.A*/ } private val LD_A_B = Opcode(0x78,4,1,"LD A,B") { ctx => ctx.A = ctx.B } private val LD_A_C = Opcode(0x79,4,1,"LD A,C") { ctx => ctx.A = ctx.C } private val LD_A_D = Opcode(0x7A,4,1,"LD A,D") { ctx => ctx.A = ctx.D } private val LD_A_E = Opcode(0x7B,4,1,"LD A,E") { ctx => ctx.A = ctx.E } private val LD_A_H = Opcode(0x7C,4,1,"LD A,H") { ctx => ctx.A = ctx.H } private val LD_A_L = Opcode(0x7D,4,1,"LD A,L") { ctx => ctx.A = ctx.L } // private val LD_B_A = Opcode(0x47,4,1,"LD B,A") { ctx => ctx.B = ctx.A } private val LD_B_B = Opcode(0x40,4,1,"LD B,B") { ctx => /*ctx.B = ctx.B*/ } private val LD_B_C = Opcode(0x41,4,1,"LD B,C") { ctx => ctx.B = ctx.C } private val LD_B_D = Opcode(0x42,4,1,"LD B,D") { ctx => ctx.B = ctx.D } private val LD_B_E = Opcode(0x43,4,1,"LD B,E") { ctx => ctx.B = ctx.E } private val LD_B_H = Opcode(0x44,4,1,"LD B,H") { ctx => ctx.B = ctx.H } private val LD_B_L = Opcode(0x45,4,1,"LD B,L") { ctx => ctx.B = ctx.L } // private val LD_C_A = Opcode(0x4F,4,1,"LD C,A") { ctx => ctx.C = ctx.A } private val LD_C_B = Opcode(0x48,4,1,"LD C,B") { ctx => ctx.C = ctx.B } private val LD_C_C = Opcode(0x49,4,1,"LD C,C") { ctx => /*ctx.C = ctx.C*/ } private val LD_C_D = Opcode(0x4A,4,1,"LD C,D") { ctx => ctx.C = ctx.D } private val LD_C_E = Opcode(0x4B,4,1,"LD C,E") { ctx => ctx.C = ctx.E } private val LD_C_H = Opcode(0x4C,4,1,"LD C,H") { ctx => ctx.C = ctx.H } private val LD_C_L = Opcode(0x4D,4,1,"LD C,L") { ctx => ctx.C = ctx.L } // private val LD_D_A = Opcode(0x57,4,1,"LD D,A") { ctx => ctx.D = ctx.A } private val LD_D_B = Opcode(0x50,4,1,"LD D,B") { ctx => ctx.D = ctx.B } private val LD_D_C = Opcode(0x51,4,1,"LD D,C") { ctx => ctx.D = ctx.C } private val LD_D_D = Opcode(0x52,4,1,"LD D,D") { ctx => /*ctx.D = ctx.D*/ } private val LD_D_E = Opcode(0x53,4,1,"LD D,E") { ctx => ctx.D = ctx.E } private val LD_D_H = Opcode(0x54,4,1,"LD D,H") { ctx => ctx.D = ctx.H } private val LD_D_L = Opcode(0x55,4,1,"LD D,L") { ctx => ctx.D = ctx.L } // private val LD_E_A = Opcode(0x5F,4,1,"LD E,A") { ctx => ctx.E = ctx.A } private val LD_E_B = Opcode(0x58,4,1,"LD E,B") { ctx => ctx.E = ctx.B } private val LD_E_C = Opcode(0x59,4,1,"LD E,C") { ctx => ctx.E = ctx.C } private val LD_E_D = Opcode(0x5A,4,1,"LD E,D") { ctx => ctx.E = ctx.D } private val LD_E_E = Opcode(0x5B,4,1,"LD E,E") { ctx => /*ctx.E = ctx.E*/ } private val LD_E_H = Opcode(0x5C,4,1,"LD E,H") { ctx => ctx.E = ctx.H } private val LD_E_L = Opcode(0x5D,4,1,"LD E,L") { ctx => ctx.E = ctx.L } // private val LD_H_A = Opcode(0x67,4,1,"LD H,A") { ctx => ctx.H = ctx.A } private val LD_H_B = Opcode(0x60,4,1,"LD H,B") { ctx => ctx.H = ctx.B } private val LD_H_C = Opcode(0x61,4,1,"LD H,C") { ctx => ctx.H = ctx.C } private val LD_H_D = Opcode(0x62,4,1,"LD H,D") { ctx => ctx.H = ctx.D } private val LD_H_E = Opcode(0x63,4,1,"LD H,E") { ctx => ctx.H = ctx.E } private val LD_H_H = Opcode(0x64,4,1,"LD H,H") { ctx => /*ctx.H = ctx.H*/ } private val LD_H_L = Opcode(0x65,4,1,"LD H,L") { ctx => ctx.H = ctx.L } // private val LD_L_A = Opcode(0x6F,4,1,"LD L,A") { ctx => ctx.L = ctx.A } private val LD_L_B = Opcode(0x68,4,1,"LD L,B") { ctx => ctx.L = ctx.B } private val LD_L_C = Opcode(0x69,4,1,"LD L,C") { ctx => ctx.L = ctx.C } private val LD_L_D = Opcode(0x6A,4,1,"LD L,D") { ctx => ctx.L = ctx.D } private val LD_L_E = Opcode(0x6B,4,1,"LD L,E") { ctx => ctx.L = ctx.E } private val LD_L_H = Opcode(0x6C,4,1,"LD L,H") { ctx => ctx.L = ctx.H } private val LD_L_L = Opcode(0x6D,4,1,"LD L,L") { ctx => /*ctx.L = ctx.L*/ } // *** LD r,n // ************** private def MNEMONIC_n(pattern:String,ofs:Int = 1) = (m:Memory,PC:Int) => pattern.format(hex2(m.read(PC + ofs))) private val LD_A_n = Opcode(0x3E,7,2,MNEMONIC_n("LD A,%s")) { ctx => ctx.A = ctx.byte(1) } private val LD_B_n = Opcode(0x06,7,2,MNEMONIC_n("LD B,%s")) { ctx => ctx.B = ctx.byte(1) } private val LD_C_n = Opcode(0x0E,7,2,MNEMONIC_n("LD C,%s")) { ctx => ctx.C = ctx.byte(1) } private val LD_D_n = Opcode(0x16,7,2,MNEMONIC_n("LD D,%s")) { ctx => ctx.D = ctx.byte(1) } private val LD_E_n = Opcode(0x1E,7,2,MNEMONIC_n("LD E,%s")) { ctx => ctx.E = ctx.byte(1) } private val LD_H_n = Opcode(0x26,7,2,MNEMONIC_n("LD H,%s")) { ctx => ctx.H = ctx.byte(1) } private val LD_L_n = Opcode(0x2E,7,2,MNEMONIC_n("LD L,%s")) { ctx => ctx.L = ctx.byte(1) } // *** LD r,(HL) // ************** private val LD_A_$HL$ = Opcode(0x7E,7,1,"LD A,(HL)") { ctx => ctx.A = ctx.read(ctx.HL) } private val LD_B_$HL$ = Opcode(0x46,7,1,"LD B,(HL)") { ctx => ctx.B = ctx.read(ctx.HL) } private val LD_C_$HL$ = Opcode(0x4E,7,1,"LD C,(HL)") { ctx => ctx.C = ctx.read(ctx.HL) } private val LD_D_$HL$ = Opcode(0x56,7,1,"LD D,(HL)") { ctx => ctx.D = ctx.read(ctx.HL) } private val LD_E_$HL$ = Opcode(0x5E,7,1,"LD E,(HL)") { ctx => ctx.E = ctx.read(ctx.HL) } private val LD_H_$HL$ = Opcode(0x66,7,1,"LD H,(HL)") { ctx => ctx.H = ctx.read(ctx.HL) } private val LD_L_$HL$ = Opcode(0x6E,7,1,"LD L,(HL)") { ctx => ctx.L = ctx.read(ctx.HL) } // *** LD A,(BC) // ************** private val LD_A_$BC$ = Opcode(0x0A,7,1,"LD A,(BC)") { ctx => ctx.A = ctx.read(ctx.BC) ctx.memptr = (ctx.BC + 1) & 0xFFFF } // *** LD A,(DE) // ************** private val LD_A_$DE$ = Opcode(0x1A,7,1,"LD A,(DE)") { ctx => ctx.A = ctx.read(ctx.DE) ctx.memptr = (ctx.DE + 1) & 0xFFFF } // *** LD r,(IX + d) // ************** private def MNEMONIC_IXY_d(pattern:String) = (m:Memory,PC:Int) => { val ofs = m.read(PC + 2).asInstanceOf[Byte] if (ofs > 0) pattern.format(" + " + hex2(ofs)) else pattern.format(" - " + hex2(-ofs)) } private val LD_A_$IX_d$ = Opcode((0xDD,0x7E),19,3,MNEMONIC_IXY_d("LD A,(IX %s)")) { ctx => ctx.A = ctx.read(ctx.INDEX_+(ctx.byte(2))) } private val LD_B_$IX_d$ = Opcode((0xDD,0x46),19,3,MNEMONIC_IXY_d("LD B,(IX %s)")) { ctx => ctx.B = ctx.read(ctx.INDEX_+(ctx.byte(2))) } private val LD_C_$IX_d$ = Opcode((0xDD,0x4E),19,3,MNEMONIC_IXY_d("LD C,(IX %s)")) { ctx => ctx.C = ctx.read(ctx.INDEX_+(ctx.byte(2))) } private val LD_D_$IX_d$ = Opcode((0xDD,0x56),19,3,MNEMONIC_IXY_d("LD D,(IX %s)")) { ctx => ctx.D = ctx.read(ctx.INDEX_+(ctx.byte(2))) } private val LD_E_$IX_d$ = Opcode((0xDD,0x5E),19,3,MNEMONIC_IXY_d("LD E,(IX %s)")) { ctx => ctx.E = ctx.read(ctx.INDEX_+(ctx.byte(2))) } private val LD_H_$IX_d$ = Opcode((0xDD,0x66),19,3,MNEMONIC_IXY_d("LD H,(IX %s)")) { ctx => ctx.H = ctx.read(ctx.INDEX_+(ctx.byte(2))) } private val LD_L_$IX_d$ = Opcode((0xDD,0x6E),19,3,MNEMONIC_IXY_d("LD L,(IX %s)")) { ctx => ctx.L = ctx.read(ctx.INDEX_+(ctx.byte(2))) } // *** LD A,(nn) // ************** private def MNEMONIC_nn(pattern:String,ofs:Int = 1) = (m:Memory,PC:Int) => pattern.format(hex4(WORD(m.read(PC + ofs + 1),m.read(PC + ofs)))) private val LD_A_$nn$ = Opcode(0x3A,13,3,MNEMONIC_nn("LD A,(%s)")) { ctx => val addr = ctx.word(1) ctx.A = ctx.read(addr) ctx.memptr = (addr + 1) & 0xFFFF } // *** LD (HL),r // ************** private val LD_$HL$_A = Opcode(0x77,7,1,"LD (HL),A") { ctx => ctx.write(ctx.HL,ctx.A) } private val LD_$HL$_B = Opcode(0x70,7,1,"LD (HL),B") { ctx => ctx.write(ctx.HL,ctx.B) } private val LD_$HL$_C = Opcode(0x71,7,1,"LD (HL),C") { ctx => ctx.write(ctx.HL,ctx.C) } private val LD_$HL$_D = Opcode(0x72,7,1,"LD (HL),D") { ctx => ctx.write(ctx.HL,ctx.D) } private val LD_$HL$_E = Opcode(0x73,7,1,"LD (HL),E") { ctx => ctx.write(ctx.HL,ctx.E) } private val LD_$HL$_H = Opcode(0x74,7,1,"LD (HL),H") { ctx => ctx.write(ctx.HL,ctx.H) } private val LD_$HL$_L = Opcode(0x75,7,1,"LD (HL),L") { ctx => ctx.write(ctx.HL,ctx.L) } // *** LD (HL),n // ************** private val LD_$HL$_n = Opcode(0x36,10,2,MNEMONIC_n("LD (HL),%s")) { ctx => ctx.write(ctx.HL,ctx.byte(1)) } // *** LD (BC),A // ************** private val LD_$BC$_A = Opcode(0x02,7,1,"LD (BC),A") { ctx => ctx.write(ctx.BC,ctx.A) ctx.memptr = (ctx.BC + 1) & 0xFF | ctx.A << 8 } // *** LD (DE),A // ************** private val LD_$DE$_A = Opcode(0x12,7,1,"LD (DE),A") { ctx => ctx.write(ctx.DE,ctx.A) ctx.memptr = (ctx.DE + 1) & 0xFF | ctx.A << 8 } // *** LD (IX + d),r // ************** private val LD_$IX_d$_A = Opcode((0xDD,0x77),19,3,MNEMONIC_IXY_d("LD (IX %s),A")) { ctx => ctx.write(ctx.INDEX_+(ctx.byte(2)),ctx.A) } private val LD_$IX_d$_B = Opcode((0xDD,0x70),19,3,MNEMONIC_IXY_d("LD (IX %s),B")) { ctx => ctx.write(ctx.INDEX_+(ctx.byte(2)),ctx.B) } private val LD_$IX_d$_C = Opcode((0xDD,0x71),19,3,MNEMONIC_IXY_d("LD (IX %s),C")) { ctx => ctx.write(ctx.INDEX_+(ctx.byte(2)),ctx.C) } private val LD_$IX_d$_D = Opcode((0xDD,0x72),19,3,MNEMONIC_IXY_d("LD (IX %s),D")) { ctx => ctx.write(ctx.INDEX_+(ctx.byte(2)),ctx.D) } private val LD_$IX_d$_E = Opcode((0xDD,0x73),19,3,MNEMONIC_IXY_d("LD (IX %s),E")) { ctx => ctx.write(ctx.INDEX_+(ctx.byte(2)),ctx.E) } private val LD_$IX_d$_H = Opcode((0xDD,0x74),19,3,MNEMONIC_IXY_d("LD (IX %s),H")) { ctx => ctx.write(ctx.INDEX_+(ctx.byte(2)),ctx.H) } private val LD_$IX_d$_L = Opcode((0xDD,0x75),19,3,MNEMONIC_IXY_d("LD (IX %s),L")) { ctx => ctx.write(ctx.INDEX_+(ctx.byte(2)),ctx.L) } // *** LD (IX + d),n // ************** private def MNEMONIC_IXY_d_n(pattern:String) = (m:Memory,PC:Int) => { val ofs = m.read(PC + 2).asInstanceOf[Byte] val n = hex2(m.read(PC + 3)) if (ofs > 0) pattern.format("+ " + hex2(ofs),n) else pattern.format("- " + hex2(-ofs),n) } private val LD_$IX_d$_n = Opcode((0xDD,0x36),19,4,MNEMONIC_IXY_d_n("LD (IX %s),%s")) { ctx => val ofs = ctx.byte(2) val value = ctx.byte(3) ctx.io.internalOperation(2,ctx.PC) ctx.write(ctx.INDEX_+(ofs,false),value) } // *** LD (nn),A // ************** private val LD_$nn$_A = Opcode(0x32,13,3,MNEMONIC_nn("LD (%s),A")) { ctx => val addr = ctx.word(1) ctx.write(addr,ctx.A) ctx.memptr = (addr + 1) & 0xFF | ctx.A << 8 } // *** LD I,A // ************** private val LD_I_A = Opcode((0xED,0x47),9,2,"LD I,A") { ctx => ctx.io.internalOperation(1,ctx.IR) ; ctx.I = ctx.A } // *** LD R,A // ************** private val LD_R_A = Opcode((0xED,0x4F),9,2,"LD R,A") { ctx => ctx.R = ctx.A ; ctx.io.internalOperation(1,ctx.IR) } // =========================================== LOAD 16 bit ================================================= // *** LD dd,nn // ************** private val LD_BC_nn = Opcode(0x01,10,3,MNEMONIC_nn("LD BC,%s")) { ctx => ctx.BC = ctx.word(1) } private val LD_DE_nn = Opcode(0x11,10,3,MNEMONIC_nn("LD DE,%s")) { ctx => ctx.DE = ctx.word(1) } private val LD_HL_nn = Opcode(0x21,10,3,MNEMONIC_nn("LD HL,%s")) { ctx => ctx.HL = ctx.word(1) } private val LD_SP_nn = Opcode(0x31,10,3,MNEMONIC_nn("LD SP,%s")) { ctx => ctx.SP = ctx.word(1) } // *** LD IX,nn // ************** private val LD_IX_nn = Opcode((0xDD,0x21),14,4,MNEMONIC_nn("LD IX,%s",2)) { ctx => ctx.INDEX = ctx.word(2) } // UNDOCUMENTED // *** LD IXH,n // ************** private val LD_IXH_n = Opcode((0xDD,0x26),11,3,MNEMONIC_nn("LD IXH,%s",2)) { ctx => ctx.INDEX_H = ctx.byte(2) } // UNDOCUMENTED // *** LD IXL,n // ************** private val LD_IXL_n = Opcode((0xDD,0x2E),11,3,MNEMONIC_n("LD IXL,%s",2)) { ctx => ctx.INDEX_L = ctx.byte(2) } // UNDOCUMENTED // *** LD B,IXH // ************** private val LD_B_IXH = Opcode((0xDD,0x44),8,2,"LD B,IXH") { ctx => ctx.B = ctx.INDEX_H } // UNDOCUMENTED // *** LD B,IXL // ************** private val LD_B_IXL = Opcode((0xDD,0x45),8,2,"LD B,IXL") { ctx => ctx.B = ctx.INDEX_L } // UNDOCUMENTED // *** LD C,IXH // ************** private val LD_C_IXH = Opcode((0xDD,0x4C),8,2,"LD C,IXH") { ctx => ctx.C = ctx.INDEX_H } // UNDOCUMENTED // *** LD C,IXL // ************** private val LD_C_IXL = Opcode((0xDD,0x4D),8,2,"LD C,IXL") { ctx => ctx.C = ctx.INDEX_L } // UNDOCUMENTED // *** LD D,IXH // ************** private val LD_D_IXH = Opcode((0xDD,0x54),8,2,"LD D,IXH") { ctx => ctx.D = ctx.INDEX_H } // UNDOCUMENTED // *** LD B,IXL // ************** private val LD_D_IXL = Opcode((0xDD,0x55),8,2,"LD D,IXL") { ctx => ctx.D = ctx.INDEX_L } // UNDOCUMENTED // *** LD E,IXH // ************** private val LD_E_IXH = Opcode((0xDD,0x5C),8,2,"LD E,IXH") { ctx => ctx.E = ctx.INDEX_H } // UNDOCUMENTED // *** LD C,IXL // ************** private val LD_E_IXL = Opcode((0xDD,0x5D),8,2,"LD E,IXL") { ctx => ctx.E = ctx.INDEX_L } // UNDOCUMENTED // *** LD IXH,B // ************** private val LD_IXH_B = Opcode((0xDD,0x60),8,2,"LD IXH,B") { ctx => ctx.INDEX_H = ctx.B } // UNDOCUMENTED // *** LD IXH,C // ************** private val LD_IXH_C = Opcode((0xDD,0x61),8,2,"LD IXH,C") { ctx => ctx.INDEX_H = ctx.C } // UNDOCUMENTED // *** LD IXH,D // ************** private val LD_IXH_D = Opcode((0xDD,0x62),8,2,"LD IXH,D") { ctx => ctx.INDEX_H = ctx.D } // UNDOCUMENTED // *** LD IXH,E // ************** private val LD_IXH_E = Opcode((0xDD,0x63),8,2,"LD IXH,E") { ctx => ctx.INDEX_H = ctx.E } // UNDOCUMENTED // *** LD IXH,IXH // ************** private val LD_IXH_IXH = Opcode((0xDD,0x64),8,2,"LD IXH,IXH") { ctx => /*ctx.INDEX_H = ctx.INDEX_H*/ } // UNDOCUMENTED // *** LD IXH,IXL // ************** private val LD_IXH_IXL = Opcode((0xDD,0x65),8,2,"LD IXH,IXL") { ctx => ctx.INDEX_H = ctx.INDEX_L } // UNDOCUMENTED // *** LD IXH,A // ************** private val LD_IXH_A = Opcode((0xDD,0x67),8,2,"LD IXH,A") { ctx => ctx.INDEX_H = ctx.A } // UNDOCUMENTED // *** LD IXL,B // ************** private val LD_IXL_B = Opcode((0xDD,0x68),8,2,"LD IXL,B") { ctx => ctx.INDEX_L = ctx.B } // UNDOCUMENTED // *** LD IXL,C // ************** private val LD_IXL_C = Opcode((0xDD,0x69),8,2,"LD IXL,C") { ctx => ctx.INDEX_L = ctx.C } // UNDOCUMENTED // *** LD IXL,D // ************** private val LD_IXL_D = Opcode((0xDD,0x6A),8,2,"LD IXL,D") { ctx => ctx.INDEX_L = ctx.D } // UNDOCUMENTED // *** LD IXL,E // ************** private val LD_IXL_E = Opcode((0xDD,0x6B),8,2,"LD IXL,E") { ctx => ctx.INDEX_L = ctx.E } // UNDOCUMENTED // *** LD IXL,IXH // ************** private val LD_IXL_IXH = Opcode((0xDD,0x6C),8,2,"LD IXL,IXH") { ctx => ctx.INDEX_L = ctx.INDEX_H } // UNDOCUMENTED // *** LD IXL,IXL // ************** private val LD_IXL_IXL = Opcode((0xDD,0x6D),8,2,"LD IXL,IXL") { ctx => /*ctx.INDEX_L = ctx.INDEX_L*/ } // UNDOCUMENTED // *** LD IXL,A // ************** private val LD_IXL_A = Opcode((0xDD,0x6F),8,2,"LD IXL,A") { ctx => ctx.INDEX_L = ctx.A } // UNDOCUMENTED // *** LD A,IXH // ************** private val LD_A_IXH = Opcode((0xDD,0x7C),8,2,"LD A,IXH") { ctx => ctx.A = ctx.INDEX_H } // UNDOCUMENTED // *** LD A,IXL // ************** private val LD_A_IXL = Opcode((0xDD,0x7D),8,2,"LD A,IXL") { ctx => ctx.A = ctx.INDEX_L } // *** LD dd,(nn) // ************** private val LD_BC_$nn$ = Opcode((0xED,0x4B),20,4,MNEMONIC_nn("LD BC,(%s)",2)) { ctx => val addr = ctx.word(2) ctx.BC = ctx.readW(addr) ctx.memptr = (addr + 1) & 0xFFFF } private val LD_DE_$nn$ = Opcode((0xED,0x5B),20,4,MNEMONIC_nn("LD DE,(%s)",2)) { ctx => val addr = ctx.word(2) ctx.DE = ctx.readW(addr) ctx.memptr = (addr + 1) & 0xFFFF } private val LD_HL_$nn$ = Opcode((0xED,0x6B),20,4,MNEMONIC_nn("LD HL,(%s)",2)) { ctx => val addr = ctx.word(2) ctx.HL = ctx.readW(addr) ctx.memptr = (addr + 1) & 0xFFFF } private val LD_SP_$nn$ = Opcode((0xED,0x7B),20,4,MNEMONIC_nn("LD SP,(%s)",2)) { ctx => val addr = ctx.word(2) ctx.SP = ctx.readW(addr) ctx.memptr = (addr + 1) & 0xFFFF } // *** LD HL,(nn) // ************** private val LD2_HL_$nn$ = Opcode(0x2A,16,3,MNEMONIC_nn("LD HL,(%s)")) { ctx => val addr = ctx.word(1) ctx.HL = ctx.readW(addr) ctx.memptr = (addr + 1) & 0xFFFF } // *** LD IX,(nn) // ************** private val LD_IX_$nn$ = Opcode((0xDD,0x2A),20,4,MNEMONIC_nn("LD IX,(%s)",2)) { ctx => val addr = ctx.word(2) ctx.INDEX = ctx.readW(addr) ctx.memptr = (addr + 1) & 0xFFFF } // *** LD SP,HL // ************** private val LD_SP_HL = Opcode(0xF9,6,1,"LD SP,HL") { ctx => ctx.io.internalOperation(2,ctx.IR) ; ctx.SP = ctx.HL } // *** LD SP,IX // ************** private val LD_SP_IX = Opcode((0xDD,0xF9),10,2,"LD SP,IX") { ctx => ctx.io.internalOperation(2,ctx.IR) ; ctx.SP = ctx.INDEX } // *** LD (nn),dd // ************** private val LD_$nn$_BC = Opcode((0xED,0x43),20,4,MNEMONIC_nn("LD (%s),BC",2)) { ctx => val addr = ctx.word(2) ctx.writeW(addr,ctx.BC) ctx.memptr = addr + 1 } private val LD_$nn$_DE = Opcode((0xED,0x53),20,4,MNEMONIC_nn("LD (%s),DE",2)) { ctx => val addr = ctx.word(2) ctx.writeW(addr,ctx.DE) ctx.memptr = addr + 1 } private val LD_$nn$_HL = Opcode((0xED,0x63),20,4,MNEMONIC_nn("LD (%s),HL",2)) { ctx => val addr = ctx.word(2) ctx.writeW(addr,ctx.HL) ctx.memptr = addr + 1 } private val LD_$nn$_SP = Opcode((0xED,0x73),20,4,MNEMONIC_nn("LD (%s),SP",2)) { ctx => val addr = ctx.word(2) ctx.writeW(addr,ctx.SP) ctx.memptr = addr + 1 } private val LD_$nn$_IX = Opcode((0xDD,0x22),20,4,MNEMONIC_nn("LD (%s),IX",2)) { ctx => val addr = ctx.word(2) ctx.writeW(addr,ctx.INDEX) ctx.memptr = addr + 1 } // *** LD (nn),HL // ************** private val LD2_$nn$_$HL$ = Opcode(0x22,16,3,MNEMONIC_nn("LD (%s),HL")) { ctx => val addr = ctx.word(1) ctx.writeW(addr,ctx.HL) ctx.memptr = addr + 1 } // *** PUSH AF // ************** private val PUSH_AF = Opcode(0xF5,11,1,"PUSH AF") { ctx => ctx.io.internalOperation(1,ctx.IR) ; ctx.push(ctx.AF) } // *** PUSH BC // ************** private val PUSH_BC = Opcode(0xC5,11,1,"PUSH BC") { ctx => ctx.io.internalOperation(1,ctx.IR) ; ctx.push(ctx.BC) } // *** PUSH DE // ************** private val PUSH_DE = Opcode(0xD5,11,1,"PUSH DE") { ctx => ctx.io.internalOperation(1,ctx.IR) ; ctx.push(ctx.DE) } // *** PUSH HL // ************** private val PUSH_HL = Opcode(0xE5,11,1,"PUSH HL") { ctx => ctx.io.internalOperation(1,ctx.IR) ; ctx.push(ctx.HL) } // *** PUSH IX // ************** private val PUSH_IX = Opcode((0xDD,0xE5),15,2,"PUSH IX") { ctx => ctx.io.internalOperation(1,ctx.IR) ; ctx.push(ctx.INDEX) } // *** POP AF // ************** private val POP_AF = Opcode(0xF1,10,1,"POP AF") { ctx => ctx.AF = ctx.pop } // *** POP BC // ************** private val POP_BC = Opcode(0xC1,10,1,"POP BC") { ctx => ctx.BC = ctx.pop } // *** POP DE // ************** private val POP_DE = Opcode(0xD1,10,1,"POP DE") { ctx => ctx.DE = ctx.pop } // *** POP HL // ************** private val POP_HL = Opcode(0xE1,10,1,"POP HL") { ctx => ctx.HL = ctx.pop } // *** POP IX // ************** private val POP_IX = Opcode((0xDD,0xE1),14,2,"POP IX") { ctx => ctx.INDEX = ctx.pop } // ======================================= Exchange ======================================================== // *** EXX // ************** private val EXX = Opcode(0xD9,4,1,"EXX") { ctx => ctx.EXX } // *** EX DE,HL // ************** private val EX_DE_HL = Opcode(0xEB,4,1,"EX DE,HL") { ctx => ctx.EX_DE_HL } // *** EX AF,AF' // ************** private val EX_AF_AF1 = Opcode(0x08,4,1,"EX AF,AF'") { ctx => ctx.EX_AF } // *** EX (SP),HL // ************** private val EX_$SP$_HL = Opcode(0xE3,19,1,"EX (SP),HL") { ctx => ctx.EX_SP_HL } // *** EX (SP),IX // ************** private val EX_$SP$_IX = Opcode((0xDD,0xE3),23,2,"EX (SP),IX") { ctx => ctx.EX_SP_IX } // ======================================= Block Transfer ================================================== // *** LDI // ************** private val LDI = Opcode((0xED,0xA0),16,2,"LDI") { ctx => ctx.ldi } // *** LDIR // ************** private val LDIR = Opcode((0xED,0xB0),21,2,"LDIR",modifyPC = true) { ctx => ctx.ldir } // *** LDD // ************** private val LDD = Opcode((0xED,0xA8),16,2,"LDD") { ctx => ctx.ldd } // *** LDDR // ************** private val LDDR = Opcode((0xED,0xB8),21,2,"LDDR",modifyPC = true) { ctx => ctx.lddr } // *** CPI // ************** private val CPI = Opcode((0xED,0xA1),16,2,"CPI") { ctx => ctx.cpi } // *** CPIR // ************** private val CPIR = Opcode((0xED,0xB1),21,2,"CPIR",modifyPC = true) { ctx => ctx.cpir } // *** CPDR // ************** private val CPDR = Opcode((0xED,0xB9),21,2,"CPDR",modifyPC = true) { ctx => ctx.cpdr } // *** CPD // ************** private val CPD = Opcode((0xED,0xA9),16,2,"CPD") { ctx => ctx.cpd } // ===================================== 8 bit arithmetic ================================================== // *** ADD A,r // ************** private val ADD_A_A = Opcode(0x87,4,1,"ADD A,A") { ctx => ctx.add(ctx.A) } private val ADD_A_B = Opcode(0x80,4,1,"ADD A,B") { ctx => ctx.add(ctx.B) } private val ADD_A_C = Opcode(0x81,4,1,"ADD A,C") { ctx => ctx.add(ctx.C) } private val ADD_A_D = Opcode(0x82,4,1,"ADD A,D") { ctx => ctx.add(ctx.D) } private val ADD_A_E = Opcode(0x83,4,1,"ADD A,E") { ctx => ctx.add(ctx.E) } private val ADD_A_H = Opcode(0x84,4,1,"ADD A,H") { ctx => ctx.add(ctx.H) } private val ADD_A_L = Opcode(0x85,4,1,"ADD A,L") { ctx => ctx.add(ctx.L) } // *** ADD A,(HL) // ************** private val ADD_A_$HL$ = Opcode(0x86,7,1,"ADD A,(HL)") { ctx => ctx.add(ctx.read(ctx.HL)) } // *** ADD A,(IX + d) // ************** private val ADD_A_$IX_d$ = Opcode((0xDD,0x86),19,3,MNEMONIC_IXY_d("ADD A,(IX%s)")) { ctx => ctx.add(ctx.read(ctx.INDEX_+(ctx.byte(2)))) } // *** ADD A,n // ************** private val ADD_A_n = Opcode(0xC6,7,2,MNEMONIC_n("ADD A,%s")) { ctx => ctx.add(ctx.byte(1)) } // *** ADC A,r // ************** private val ADC_A_A = Opcode(0x8F,4,1,"ADC A,A") { ctx => ctx.adc(ctx.A) } private val ADC_A_B = Opcode(0x88,4,1,"ADC A,B") { ctx => ctx.adc(ctx.B) } private val ADC_A_C = Opcode(0x89,4,1,"ADC A,C") { ctx => ctx.adc(ctx.C) } private val ADC_A_D = Opcode(0x8A,4,1,"ADC A,D") { ctx => ctx.adc(ctx.D) } private val ADC_A_E = Opcode(0x8B,4,1,"ADC A,E") { ctx => ctx.adc(ctx.E) } private val ADC_A_H = Opcode(0x8C,4,1,"ADC A,H") { ctx => ctx.adc(ctx.H) } private val ADC_A_L = Opcode(0x8D,4,1,"ADC A,L") { ctx => ctx.adc(ctx.L) } // *** ADC A,(HL) // ************** private val ADC_A_$HL$ = Opcode(0x8E,7,1,"ADC A,L") { ctx => ctx.adc(ctx.read(ctx.HL)) } // *** ADC A,(IX + d) // ************** private val ADC_A_$IX_d$ = Opcode((0xDD,0x8E),19,3,MNEMONIC_IXY_d("ADC A,(IX%s)")) { ctx => ctx.adc(ctx.read(ctx.INDEX_+(ctx.byte(2)))) } // *** ADC A,n // ************** private val ADC_A_n = Opcode(0xCE,7,2,MNEMONIC_n("ADC A,%s")) { ctx => ctx.adc(ctx.byte(1)) } // UNDOCUMENTED // *** ADC A,IXL // ************** private val ADC_A_IXL = Opcode((0xDD,0x8D),8,2,"ADC IXL") { ctx => ctx.adc(ctx.INDEX_L) } // UNDOCUMENTED // *** ADC A,IXH // ************** private val ADC_A_IXH = Opcode((0xDD,0x8C),8,2,"ADC IXH") { ctx => ctx.adc(ctx.INDEX_H) } // UNDOCUMENTED // *** ADD A,IXH // ************** private val ADD_A_IXH = Opcode((0xDD,0x84),8,2,"ADD IXH") { ctx => ctx.add(ctx.INDEX_H) } // UNDOCUMENTED // *** ADD A,IXL // ************** private val ADD_A_IXL = Opcode((0xDD,0x85),8,2,"ADD IXL") { ctx => ctx.add(ctx.INDEX_L) } // *** SUB r // ************** private val SUB_A = Opcode(0x97,4,1,"SUB A") { ctx => ctx.sub(ctx.A) } private val SUB_B = Opcode(0x90,4,1,"SUB B") { ctx => ctx.sub(ctx.B) } private val SUB_C = Opcode(0x91,4,1,"SUB C") { ctx => ctx.sub(ctx.C) } private val SUB_D = Opcode(0x92,4,1,"SUB D") { ctx => ctx.sub(ctx.D) } private val SUB_E = Opcode(0x93,4,1,"SUB E") { ctx => ctx.sub(ctx.E) } private val SUB_H = Opcode(0x94,4,1,"SUB H") { ctx => ctx.sub(ctx.H) } private val SUB_L = Opcode(0x95,4,1,"SUB L") { ctx => ctx.sub(ctx.L) } // *** SUB (HL) // ************** private val SUB_$HL$ = Opcode(0x96,7,1,"SUB (HL)") { ctx => ctx.sub(ctx.read(ctx.HL)) } // *** SUB (IX + d) // ************** private val SUB_$IX_d$ = Opcode((0xDD,0x96),19,3,MNEMONIC_IXY_d("SUB (IX%s)")) { ctx => ctx.sub(ctx.read(ctx.INDEX_+(ctx.byte(2)))) } // *** SUB n // ************** private val SUB_n = Opcode(0xD6,7,2,MNEMONIC_n("SUB %s")) { ctx => ctx.sub(ctx.byte(1)) } // *** SBC r // ************** private val SBC_A = Opcode(0x9F,4,1,"SBC A,A") { ctx => ctx.sbc(ctx.A) } private val SBC_B = Opcode(0x98,4,1,"SBC A,B") { ctx => ctx.sbc(ctx.B) } private val SBC_C = Opcode(0x99,4,1,"SBC A,C") { ctx => ctx.sbc(ctx.C) } private val SBC_D = Opcode(0x9A,4,1,"SBC A,D") { ctx => ctx.sbc(ctx.D) } private val SBC_E = Opcode(0x9B,4,1,"SBC A,E") { ctx => ctx.sbc(ctx.E) } private val SBC_H = Opcode(0x9C,4,1,"SBC A,H") { ctx => ctx.sbc(ctx.H) } private val SBC_L = Opcode(0x9D,4,1,"SBC A,L") { ctx => ctx.sbc(ctx.L) } // *** SBC A,(HL) // ************** private val SBC_A_$HL$ = Opcode(0x9E,7,1,"SBC A,(HL)") { ctx => ctx.sbc(ctx.read(ctx.HL)) } // *** SBC A,(IX + d) // ************** private val SBC_A_$IX_d$ = Opcode((0xDD,0x9E),19,3,MNEMONIC_IXY_d("SBC A,(IX%s)")) { ctx => ctx.sbc(ctx.read(ctx.INDEX_+(ctx.byte(2)))) } // *** SBC n // ************** private val SBC_n = Opcode(0xDE,7,2,MNEMONIC_n("SBC A,%s")) { ctx => ctx.sbc(ctx.byte(1)) } // UNDOCUMENTED // *** SUB IXH // ************** private val SUB_IXH = Opcode((0xDD,0x94),8,2,"SUB IXH") { ctx => ctx.sub(ctx.INDEX_H) } // UNDOCUMENTED // *** SUB IXL // ************** private val SUB_IXL = Opcode((0xDD,0x95),8,2,"SUB IXL") { ctx => ctx.sub(ctx.INDEX_L) } // UNDOCUMENTED // *** SBC IXH // ************** private val SBC_IXH = Opcode((0xDD,0x9C),8,2,"SBC IXH") { ctx => ctx.sbc(ctx.INDEX_H) } // UNDOCUMENTED // *** SBC IXL // ************** private val SBC_IXL = Opcode((0xDD,0x9D),8,2,"SBC IXL") { ctx => ctx.sbc(ctx.INDEX_L) } // *** AND r // ************** private val AND_A = Opcode(0xA7,4,1,"AND A") { ctx => ctx.and(ctx.A) } private val AND_B = Opcode(0xA0,4,1,"AND B") { ctx => ctx.and(ctx.B) } private val AND_B_dd = Opcode((0xDD,0xA0),4,2,"AND B") { ctx => ctx.and(ctx.B) } private val AND_C = Opcode(0xA1,4,1,"AND C") { ctx => ctx.and(ctx.C) } private val AND_D = Opcode(0xA2,4,1,"AND D") { ctx => ctx.and(ctx.D) } private val AND_E = Opcode(0xA3,4,1,"AND E") { ctx => ctx.and(ctx.E) } private val AND_H = Opcode(0xA4,4,1,"AND H") { ctx => ctx.and(ctx.H) } private val AND_L = Opcode(0xA5,4,1,"AND L") { ctx => ctx.and(ctx.L) } // UNDOCUMENTED // *** AND IXH private val AND_IXH = Opcode((0xDD,0xA4),8,2,"AND IXH") { ctx => ctx.and(ctx.INDEX_H) } // UNDOCUMENTED // *** AND IXL private val AND_IXL = Opcode((0xDD,0xA5),8,2,"AND IXL") { ctx => ctx.and(ctx.INDEX_L) } // ************** // *** AND (HL) // ************** private val AND_$HL$ = Opcode(0xA6,7,1,"AND (HL)") { ctx => ctx.and(ctx.read(ctx.HL)) } // *** AND (IX + d) // ************** private val AND_$IX_d$ = Opcode((0xDD,0xA6),19,3,MNEMONIC_IXY_d("AND (IX%s)")) { ctx => ctx.and(ctx.read(ctx.INDEX_+(ctx.byte(2)))) } // *** AND n // ************** private val AND_n = Opcode(0xE6,7,2,MNEMONIC_n("AND %s")) { ctx => ctx.and(ctx.byte(1)) } // *** XOR r // ************** private val XOR_A = Opcode(0xAF,4,1,"XOR A") { ctx => ctx.xor(ctx.A) } private val XOR_B = Opcode(0xA8,4,1,"XOR B") { ctx => ctx.xor(ctx.B) } private val XOR_C = Opcode(0xA9,4,1,"XOR C") { ctx => ctx.xor(ctx.C) } private val XOR_C_dd = Opcode((0xDD,0xA9),4,2,"XOR C") { ctx => ctx.xor(ctx.C) } private val XOR_D = Opcode(0xAA,4,1,"XOR D") { ctx => ctx.xor(ctx.D) } private val XOR_E = Opcode(0xAB,4,1,"XOR E") { ctx => ctx.xor(ctx.E) } private val XOR_H = Opcode(0xAC,4,1,"XOR H") { ctx => ctx.xor(ctx.H) } private val XOR_L = Opcode(0xAD,4,1,"XOR L") { ctx => ctx.xor(ctx.L) } // *** XOR (HL) // ************** private val XOR_$HL$ = Opcode(0xAE,7,1,"XOR (HL)") { ctx => ctx.xor(ctx.read(ctx.HL)) } // *** XOR (IX + d) // ************** private val XOR_$IX_d$ = Opcode((0xDD,0xAE),19,3,MNEMONIC_IXY_d("XOR (IX%s)")) { ctx => ctx.xor(ctx.read(ctx.INDEX_+(ctx.byte(2)))) } // *** XOR n // ************** private val XOR_n = Opcode(0xEE,7,2,MNEMONIC_n("XOR %s")) { ctx => ctx.xor(ctx.byte(1)) } // UNDOCUMENTED // *** XOR IXH private val XOR_IXH = Opcode((0xDD,0xAC),8,2,"XOR IXH") { ctx => ctx.xor(ctx.INDEX_H) } // UNDOCUMENTED // *** XOR IXL private val XOR_IXL = Opcode((0xDD,0xAD),8,2,"XOR IXL") { ctx => ctx.xor(ctx.INDEX_L) } // *** OR r // ************** private val OR_A = Opcode(0xB7,4,1,"OR A") { ctx => ctx.or(ctx.A) } private val OR_B = Opcode(0xB0,4,1,"OR B") { ctx => ctx.or(ctx.B) } private val OR_C = Opcode(0xB1,4,1,"OR C") { ctx => ctx.or(ctx.C) } private val OR_D = Opcode(0xB2,4,1,"OR D") { ctx => ctx.or(ctx.D) } private val OR_E = Opcode(0xB3,4,1,"OR E") { ctx => ctx.or(ctx.E) } private val OR_H = Opcode(0xB4,4,1,"OR H") { ctx => ctx.or(ctx.H) } private val OR_L = Opcode(0xB5,4,1,"OR L") { ctx => ctx.or(ctx.L) } // *** OR (HL) // ************** private val OR_$HL$ = Opcode(0xB6,7,1,"OR (HL)") { ctx => ctx.or(ctx.read(ctx.HL)) } // *** OR (IX + d) // ************** private val OR_$IX_d$ = Opcode((0xDD,0xB6),19,3,MNEMONIC_IXY_d("OR (IX%s)")) { ctx => ctx.or(ctx.read(ctx.INDEX_+(ctx.byte(2)))) } // *** OR n // ************** private val OR_n = Opcode(0xF6,7,2,MNEMONIC_n("OR %s")) { ctx => ctx.or(ctx.byte(1)) } // UNDOCUMENTED // *** OR IXH private val OR_IXH = Opcode((0xDD,0xB4),8,2,"OR IXH") { ctx => ctx.or(ctx.INDEX_H) } // UNDOCUMENTED // *** OR IXL private val OR_IXL = Opcode((0xDD,0xB5),8,2,"OR IXL") { ctx => ctx.or(ctx.INDEX_L) } // *** CP r // ************** private val CP_A = Opcode(0xBF,4,1,"CP A") { ctx => ctx.cp(ctx.A) } private val CP_B = Opcode(0xB8,4,1,"CP B") { ctx => ctx.cp(ctx.B) } private val CP_C = Opcode(0xB9,4,1,"CP C") { ctx => ctx.cp(ctx.C) } private val CP_D = Opcode(0xBA,4,1,"CP D") { ctx => ctx.cp(ctx.D) } private val CP_E = Opcode(0xBB,4,1,"CP E") { ctx => ctx.cp(ctx.E) } private val CP_H = Opcode(0xBC,4,1,"CP H") { ctx => ctx.cp(ctx.H) } private val CP_L = Opcode(0xBD,4,1,"CP L") { ctx => ctx.cp(ctx.L) } // *** CP (HL) // ************** private val CP_$HL$ = Opcode(0xBE,7,1,"CP (HL)") { ctx => ctx.cp(ctx.read(ctx.HL)) } // *** CP (IX + d) // ************** private val CP_$IX_d$ = Opcode((0xDD,0xBE),19,3,MNEMONIC_IXY_d("CP (IX%s)")) { ctx => ctx.cp(ctx.read(ctx.INDEX_+(ctx.byte(2)))) } // *** CP n // ************** private val CP_n = Opcode(0xFE,7,2,MNEMONIC_n("CP %s")) { ctx => ctx.cp(ctx.byte(1)) } // UNDOCUMENTED // *** CP IXH private val CP_IXH = Opcode((0xDD,0xBC),8,2,"CP IXH") { ctx => ctx.cp(ctx.INDEX_H) } // UNDOCUMENTED // *** CP IXL private val CP_IXL = Opcode((0xDD,0xBD),8,2,"CP IXL") { ctx => ctx.cp(ctx.INDEX_L) } // *** INC r // ************** @inline private def incdecFlags(ctx:Context,regValueAfter:Int,inc:Boolean) : Unit = { val carry = ctx.carry ctx.F = ctx.SZP(regValueAfter) | carry ; ctx.Q = true if (inc) { ctx.setParity(regValueAfter == 0x80) ctx.setHalf((regValueAfter & 0x0F) == 0) ctx.setNegative(false) } else { ctx.setParity(regValueAfter == 0x7F) ctx.setHalf((regValueAfter & 0x0F) == 0x0F) ctx.setNegative(true) } ctx.setXY(regValueAfter) } private val INC_A = Opcode(0x3C,4,1,"INC A") { ctx => ctx.A = (ctx.A + 1) & 0xFF ; incdecFlags(ctx,ctx.A,inc = true) } private val INC_B = Opcode(0x04,4,1,"INC B") { ctx => ctx.B = (ctx.B + 1) & 0xFF ; incdecFlags(ctx,ctx.B,inc = true) } private val INC_C = Opcode(0x0C,4,1,"INC C") { ctx => ctx.C = (ctx.C + 1) & 0xFF ; incdecFlags(ctx,ctx.C,inc = true) } private val INC_D = Opcode(0x14,4,1,"INC D") { ctx => ctx.D = (ctx.D + 1) & 0xFF ; incdecFlags(ctx,ctx.D,inc = true) } private val INC_E = Opcode(0x1C,4,1,"INC E") { ctx => ctx.E = (ctx.E + 1) & 0xFF ; incdecFlags(ctx,ctx.E,inc = true) } private val INC_H = Opcode(0x24,4,1,"INC H") { ctx => ctx.H = (ctx.H + 1) & 0xFF ; incdecFlags(ctx,ctx.H,inc = true) } private val INC_L = Opcode(0x2C,4,1,"INC L") { ctx => ctx.L = (ctx.L + 1) & 0xFF ; incdecFlags(ctx,ctx.L,inc = true) } // *** INC (HL) // ************** private val INC_$HL$ = Opcode(0x34,11,1,"INC (HL)") { ctx => val adr = ctx.HL val tmp = (ctx.read(adr) + 1) & 0xFF ctx.io.internalOperation(1,adr) ctx.write(adr,tmp) incdecFlags(ctx,tmp,inc = true) } // *** INC (IX + d) // ************** private val INC_$IX_d$ = Opcode((0xDD,0x34),23,3,MNEMONIC_IXY_d("INC (IX%s)")) { ctx => val addr = ctx.INDEX_+(ctx.byte(2)) ctx.io.internalOperation(1,addr) val tmp = (ctx.read(addr) + 1) & 0xFF ctx.write(addr,tmp) ; incdecFlags(ctx,tmp,inc = true) } // UNDOCUMENTED // *** DEC IXH private val DEC_IXH = Opcode((0xDD,0x25),8,2,"DEC IXH") { ctx => ctx.INDEX_H = (ctx.INDEX_H - 1) & 0xFF ; incdecFlags(ctx,ctx.INDEX_H,inc = false) } // *** DEC IXL private val DEC_IXL = Opcode((0xDD,0x2D),8,2,"DEC IXL") { ctx => ctx.INDEX_L = (ctx.INDEX_L - 1) & 0xFF ; incdecFlags(ctx,ctx.INDEX_L,inc = false) } // UNDOCUMENTED // *** INC IXH private val INC_IXH = Opcode((0xDD,0x24),8,2,"INC IXH") { ctx => ctx.INDEX_H = (ctx.INDEX_H + 1) & 0xFF ; incdecFlags(ctx,ctx.INDEX_H,inc = true) } // *** INC IXL private val INC_IXL = Opcode((0xDD,0x2C),8,2,"INC IXL") { ctx => ctx.INDEX_L = (ctx.INDEX_L + 1) & 0xFF ; incdecFlags(ctx,ctx.INDEX_L,inc = true) } // *** DEC r // ************** private val DEC_A = Opcode(0x3D,4,1,"DEC A") { ctx => ctx.A = (ctx.A - 1) & 0xFF ; incdecFlags(ctx,ctx.A,inc = false) } private val DEC_B = Opcode(0x05,4,1,"DEC B") { ctx => ctx.B = (ctx.B - 1) & 0xFF ; incdecFlags(ctx,ctx.B,inc = false) } private val DEC_C = Opcode(0x0D,4,1,"DEC C") { ctx => ctx.C = (ctx.C - 1) & 0xFF ; incdecFlags(ctx,ctx.C,inc = false) } private val DEC_D = Opcode(0x15,4,1,"DEC D") { ctx => ctx.D = (ctx.D - 1) & 0xFF ; incdecFlags(ctx,ctx.D,inc = false) } private val DEC_E = Opcode(0x1D,4,1,"DEC E") { ctx => ctx.E = (ctx.E - 1) & 0xFF ; incdecFlags(ctx,ctx.E,inc = false) } private val DEC_H = Opcode(0x25,4,1,"DEC H") { ctx => ctx.H = (ctx.H - 1) & 0xFF ; incdecFlags(ctx,ctx.H,inc = false) } private val DEC_L = Opcode(0x2D,4,1,"DEC L") { ctx => ctx.L = (ctx.L - 1) & 0xFF ; incdecFlags(ctx,ctx.L,inc = false) } // *** DEC (HL) // ************** private val DEC_$HL$ = Opcode(0x35,11,1,"DEC (HL)") { ctx => val adr = ctx.HL val tmp = (ctx.read(adr) - 1) & 0xFF ctx.io.internalOperation(1,adr) ctx.write(adr,tmp) incdecFlags(ctx,tmp,inc = false) } // *** DEC (IX + d) // ************** private val DEC_$IX_d$ = Opcode((0xDD,0x35),23,3,MNEMONIC_IXY_d("DEC (IX%s)")) { ctx => val addr = ctx.INDEX_+(ctx.byte(2)) val tmp = (ctx.read(addr,1) - 1) & 0xFF ctx.write(addr,tmp) ; incdecFlags(ctx,tmp,inc = false) } // ==================================== 16 bit arithmetic ================================================== // *** ADD HL,rr // ************** private val ADD_HL_BC = Opcode(0x09,11,1,"ADD HL,BC") { ctx => ctx.addHLBC } private val ADD_HL_DE = Opcode(0x19,11,1,"ADD HL,DE") { ctx => ctx.addHLDE } private val ADD_HL_HL = Opcode(0x29,11,1,"ADD HL,HL") { ctx => ctx.addHLHL } private val ADD_HL_SP = Opcode(0x39,11,1,"ADD HL,SP") { ctx => ctx.addHLSP } private val ADD_IX_BC = Opcode((0xDD,0x09),15,2,"ADD IX,BC") { ctx => ctx.addIXBC } private val ADD_IX_DE = Opcode((0xDD,0x19),15,2,"ADD IX,DE") { ctx => ctx.addIXDE } private val ADD_IX_SP = Opcode((0xDD,0x39),15,2,"ADD IX,SP") { ctx => ctx.addIXSP } private val ADD_IX_IX = Opcode((0xDD,0x29),15,2,"ADD IX,IX") { ctx => ctx.addIXIX } // *** ADC HL,rr // ************** private val ADC_HL_BC = Opcode((0xED,0x4A),15,2,"ADC HL,BC") { ctx => ctx.adcHL(ctx.BC) } private val ADC_HL_DE = Opcode((0xED,0x5A),15,2,"ADC HL,DE") { ctx => ctx.adcHL(ctx.DE) } private val ADC_HL_HL = Opcode((0xED,0x6A),15,2,"ADC HL,HL") { ctx => ctx.adcHL(ctx.HL) } private val ADC_HL_SP = Opcode((0xED,0x7A),15,2,"ADC HL,SP") { ctx => ctx.adcHL(ctx.SP) } // *** SBC HL,rr // ************** private val SBC_HL_BC = Opcode((0xED,0x42),15,2,"SBC HL,BC") { ctx => ctx.sbcHL(ctx.BC) } private val SBC_HL_DE = Opcode((0xED,0x52),15,2,"SBC HL,DE") { ctx => ctx.sbcHL(ctx.DE) } private val SBC_HL_HL = Opcode((0xED,0x62),15,2,"SBC HL,HL") { ctx => ctx.sbcHL(ctx.HL) } private val SBC_HL_SP = Opcode((0xED,0x72),15,2,"SBC HL,SP") { ctx => ctx.sbcHL(ctx.SP) } // *** INC rr // ************** private val INC_BC = Opcode(0x03,6,1,"INC BC") { ctx => ctx.io.internalOperation(2,ctx.IR) ; ctx.incDecBC(true) } private val INC_DE = Opcode(0x13,6,1,"INC DE") { ctx => ctx.io.internalOperation(2,ctx.IR) ; ctx.incDecDE(true) } private val INC_HL = Opcode(0x23,6,1,"INC HL") { ctx => ctx.io.internalOperation(2,ctx.IR) ; ctx.incDecHL(true) } private val INC_SP = Opcode(0x33,6,1,"INC SP") { ctx => ctx.io.internalOperation(2,ctx.IR) ; ctx.incDecSP(true) } private val INC_IX = Opcode((0xDD,0x23),10,2,"INC IX") { ctx => ctx.io.internalOperation(2,ctx.IR) ; ctx.incDecIndex(true) } // *** DEC rr // ************** private val DEC_BC = Opcode(0x0B,6,1,"DEC BC") { ctx => ctx.io.internalOperation(2,ctx.IR) ; ctx.incDecBC(false) } private val DEC_DE = Opcode(0x1B,6,1,"DEC DE") { ctx => ctx.io.internalOperation(2,ctx.IR) ; ctx.incDecDE(false) } private val DEC_HL = Opcode(0x2B,6,1,"DEC HL") { ctx => ctx.io.internalOperation(2,ctx.IR) ; ctx.incDecHL(false) } private val DEC_SP = Opcode(0x3B,6,1,"DEC SP") { ctx => ctx.io.internalOperation(2,ctx.IR) ; ctx.incDecSP(false) } private val DEC_IX = Opcode((0xDD,0x2B),10,2,"DEC IX") { ctx => ctx.io.internalOperation(2,ctx.IR) ; ctx.incDecIndex(false) } // ==================================== General Purpose Arithmetic and Control Groups ====================== // *** NOP // ************** private val NOP = Opcode(0x00,4,1,"NOP") { ctx => } // *** HALT // ************** private val HALT = Opcode(0x76,4,1,"HALT",modifyPC = true) { ctx => ctx.halted = true } // *** EI // ************** private val EI = Opcode(0xFB,4,1,"EI") { ctx => ctx.IFF1 = 1 ; ctx.IFF2 = 1 ; ctx.setDelayInt(true) } // *** DI // ************** private val DI = Opcode(0xF3,4,1,"DI") { ctx => ctx.IFF1 = 0 ; ctx.IFF2 = 0 } // *** IM x // ************** private val IM_0 = Opcode((0xED,0x46),8,2,"IM 0",false,Array(0x4E,0x6E,0x66)) { ctx => ctx.im = 0 } private val IM_1 = Opcode((0xED,0x56),8,2,"IM 1",false,Array(0x76)) { ctx => ctx.im = 1 } private val IM_2 = Opcode((0xED,0x5E),8,2,"IM 2",false,Array(0x7E)) { ctx => ctx.im = 2 } // *** CPL // ************** private val CPL = Opcode(0x2F,4,1,"CPL") { ctx => ctx.cpl } // *** NEG // ************** private val NEG = Opcode((0xED,0x44),8,2,"NEG",false,Array(0x54,0x64,0x74,0x4C,0x5C,0x6C,0x7C)) { ctx => ctx.neg } // *** CCF // ************** private val CCF = Opcode(0x3F,4,1,"CCF") { ctx => ctx.ccf } // *** SCF // ************** private val SCF = Opcode(0x37,4,1,"SCF") { ctx => ctx.scf } // *** DAA // ************** private val DAA = Opcode(0x27,4,1,"DAA") { ctx => ctx.daa } // ==================================== Rotate and Shift Group ============================================= // *** RLC r // ************** private val RLC_A = Opcode((0xCB,0x07),8,2,"RLC A") { ctx => ctx.A = ctx.rotLC(ctx.A) } private val RLC_B = Opcode((0xCB,0x00),8,2,"RLC B") { ctx => ctx.B = ctx.rotLC(ctx.B) } private val RLC_C = Opcode((0xCB,0x01),8,2,"RLC C") { ctx => ctx.C = ctx.rotLC(ctx.C) } private val RLC_D = Opcode((0xCB,0x02),8,2,"RLC D") { ctx => ctx.D = ctx.rotLC(ctx.D) } private val RLC_E = Opcode((0xCB,0x03),8,2,"RLC E") { ctx => ctx.E = ctx.rotLC(ctx.E) } private val RLC_H = Opcode((0xCB,0x04),8,2,"RLC H") { ctx => ctx.H = ctx.rotLC(ctx.H) } private val RLC_L = Opcode((0xCB,0x05),8,2,"RLC L") { ctx => ctx.L = ctx.rotLC(ctx.L) } // *** RLC (HL) // ************** private val RLC_$HL$ = Opcode((0xCB,0x06),15,2,"RLC (HL)") { ctx => val adr = ctx.HL val tmp = ctx.read(ctx.HL) ctx.io.internalOperation(1,adr) ctx.write(adr,ctx.rotLC(tmp)) } // *** RLC (IX+d) // ************** private val RLC_$IX_d$ = Opcode((0xDD,0xCB,0x06),23,4,MNEMONIC_IXY_d("RLC (IX%s)")) { ctx => val adr = ctx.INDEX_+(ctx.byte(2)) val tmp = ctx.read(adr) ctx.io.internalOperation(1,adr) ctx.write(adr,ctx.rotLC(tmp)) } // *** RRC r // ************** private val RRC_A = Opcode((0xCB,0x0F),8,2,"RRC A") { ctx => ctx.A = ctx.rotRC(ctx.A) } private val RRC_B = Opcode((0xCB,0x08),8,2,"RRC B") { ctx => ctx.B = ctx.rotRC(ctx.B) } private val RRC_C = Opcode((0xCB,0x09),8,2,"RRC C") { ctx => ctx.C = ctx.rotRC(ctx.C) } private val RRC_D = Opcode((0xCB,0x0A),8,2,"RRC D") { ctx => ctx.D = ctx.rotRC(ctx.D) } private val RRC_E = Opcode((0xCB,0x0B),8,2,"RRC E") { ctx => ctx.E = ctx.rotRC(ctx.E) } private val RRC_H = Opcode((0xCB,0x0C),8,2,"RRC H") { ctx => ctx.H = ctx.rotRC(ctx.H) } private val RRC_L = Opcode((0xCB,0x0D),8,2,"RRC L") { ctx => ctx.L = ctx.rotRC(ctx.L) } // *** RRC (HL) // ************** private val RRC_$HL$ = Opcode((0xCB,0x0E),15,2,"RRC (HL)") { ctx => val adr = ctx.HL val tmp = ctx.read(ctx.HL) ctx.io.internalOperation(1,adr) ctx.write(adr,ctx.rotRC(tmp)) } // *** RRC (IX+d) // ************** private val RRC_$IX_d$ = Opcode((0xDD,0xCB,0x0E),23,4,MNEMONIC_IXY_d("RRC (IX%s)")) { ctx => val adr = ctx.INDEX_+(ctx.byte(2)) val tmp = ctx.read(adr) ctx.io.internalOperation(1,adr) ctx.write(adr,ctx.rotRC(tmp)) } // *** RL r // ************** private val RL_A = Opcode((0xCB,0x17),8,2,"RL A") { ctx => ctx.A = ctx.rotL(ctx.A) } private val RL_B = Opcode((0xCB,0x10),8,2,"RL B") { ctx => ctx.B = ctx.rotL(ctx.B) } private val RL_C = Opcode((0xCB,0x11),8,2,"RL C") { ctx => ctx.C = ctx.rotL(ctx.C) } private val RL_D = Opcode((0xCB,0x12),8,2,"RL D") { ctx => ctx.D = ctx.rotL(ctx.D) } private val RL_E = Opcode((0xCB,0x13),8,2,"RL E") { ctx => ctx.E = ctx.rotL(ctx.E) } private val RL_H = Opcode((0xCB,0x14),8,2,"RL H") { ctx => ctx.H = ctx.rotL(ctx.H) } private val RL_L = Opcode((0xCB,0x15),8,2,"RL L") { ctx => ctx.L = ctx.rotL(ctx.L) } // *** RL (HL) // ************** private val RL_$HL$ = Opcode((0xCB,0x16),15,2,"RL (HL)") { ctx => val adr = ctx.HL val tmp = ctx.read(ctx.HL) ctx.io.internalOperation(1,adr) ctx.write(adr,ctx.rotL(tmp)) } // *** RL (IX+d) // ************** private val RL_$IX_d$ = Opcode((0xDD,0xCB,0x16),23,4,MNEMONIC_IXY_d("RL (IX%s)")) { ctx => val adr = ctx.INDEX_+(ctx.byte(2)) val tmp = ctx.read(adr) ctx.io.internalOperation(1,adr) ctx.write(adr,ctx.rotL(tmp)) } // *** RR r // ************** private val RR_A = Opcode((0xCB,0x1F),8,2,"RR A") { ctx => ctx.A = ctx.rotR(ctx.A) } private val RR_B = Opcode((0xCB,0x18),8,2,"RR B") { ctx => ctx.B = ctx.rotR(ctx.B) } private val RR_C = Opcode((0xCB,0x19),8,2,"RR C") { ctx => ctx.C = ctx.rotR(ctx.C) } private val RR_D = Opcode((0xCB,0x1A),8,2,"RR D") { ctx => ctx.D = ctx.rotR(ctx.D) } private val RR_E = Opcode((0xCB,0x1B),8,2,"RR E") { ctx => ctx.E = ctx.rotR(ctx.E) } private val RR_H = Opcode((0xCB,0x1C),8,2,"RR H") { ctx => ctx.H = ctx.rotR(ctx.H) } private val RR_L = Opcode((0xCB,0x1D),8,2,"RR L") { ctx => ctx.L = ctx.rotR(ctx.L) } // *** RR (HL) // ************** private val RR_$HL$ = Opcode((0xCB,0x1E),15,2,"RR (HL)") { ctx => val adr = ctx.HL val tmp = ctx.read(ctx.HL) ctx.io.internalOperation(1,adr) ctx.write(adr,ctx.rotR(tmp)) } // *** RR (IX+d) // ************** private val RR_$IX_d$ = Opcode((0xDD,0xCB,0x1E),23,4,MNEMONIC_IXY_d("RR (IX%s)")) { ctx => val adr = ctx.INDEX_+(ctx.byte(2)) val tmp = ctx.read(adr) ctx.io.internalOperation(1,adr) ctx.write(adr,ctx.rotR(tmp)) } // *** SLA r // ************** private val SLA_A = Opcode((0xCB,0x27),8,2,"SLA A") { ctx => ctx.A = ctx.sla(ctx.A) } private val SLA_B = Opcode((0xCB,0x20),8,2,"SLA B") { ctx => ctx.B = ctx.sla(ctx.B) } private val SLA_C = Opcode((0xCB,0x21),8,2,"SLA C") { ctx => ctx.C = ctx.sla(ctx.C) } private val SLA_D = Opcode((0xCB,0x22),8,2,"SLA D") { ctx => ctx.D = ctx.sla(ctx.D) } private val SLA_E = Opcode((0xCB,0x23),8,2,"SLA E") { ctx => ctx.E = ctx.sla(ctx.E) } private val SLA_H = Opcode((0xCB,0x24),8,2,"SLA H") { ctx => ctx.H = ctx.sla(ctx.H) } private val SLA_L = Opcode((0xCB,0x25),8,2,"SLA L") { ctx => ctx.L = ctx.sla(ctx.L) } // *** SLA (HL) // ************** private val SLA_$HL$ = Opcode((0xCB,0x26),15,2,"SLA (HL)") { ctx => val adr = ctx.HL val tmp = ctx.read(ctx.HL) ctx.io.internalOperation(1,adr) ctx.write(adr,ctx.sla(tmp)) } // *** SLA (IX+d) // ************** private val SLA_$IX_d$ = Opcode((0xDD,0xCB,0x26),23,4,MNEMONIC_IXY_d("SLA (IX%s)")) { ctx => val adr = ctx.INDEX_+(ctx.byte(2)) val tmp = ctx.read(adr) ctx.io.internalOperation(1,adr) ctx.write(adr,ctx.sla(tmp)) } // UNDOCUMENTED // SLL r private val SLL_B = Opcode((0xCB,0x30),8,2,"SLL B") { ctx => ctx.B = ctx.sla(ctx.B, 1) } private val SLL_C = Opcode((0xCB,0x31),8,2,"SLL C") { ctx => ctx.C = ctx.sla(ctx.C, 1) } private val SLL_D = Opcode((0xCB,0x32),8,2,"SLL D") { ctx => ctx.D = ctx.sla(ctx.D, 1) } private val SLL_E = Opcode((0xCB,0x33),8,2,"SLL E") { ctx => ctx.E = ctx.sla(ctx.E, 1) } private val SLL_H = Opcode((0xCB,0x34),8,2,"SLL H") { ctx => ctx.H = ctx.sla(ctx.H, 1) } private val SLL_L = Opcode((0xCB,0x35),8,2,"SLL L") { ctx => ctx.L = ctx.sla(ctx.L, 1) } // UNDOCUMENTED // SLL (HL) private val SLL_$HL$ = Opcode((0xCB,0x36),15,2,"SLL (HL)") { ctx => val adr = ctx.HL val tmp = ctx.read(ctx.HL) ctx.io.internalOperation(1,adr) ctx.write(adr,ctx.sla(tmp, 1)) } // UNDOCUMENTED // SLL A private val SLL_A = Opcode((0xCB,0x37),8,2,"SLL A") { ctx => ctx.A = ctx.sla(ctx.A, 1) } // UNDOCUMENTED // *** SLL (IX+d) // ************** private val SLL_$IX_d$ = Opcode((0xDD,0xCB,0x36),23,4,MNEMONIC_IXY_d("SLL (IX%s)")) { ctx => val adr = ctx.INDEX_+(ctx.byte(2)) val tmp = ctx.read(adr) ctx.io.internalOperation(1,adr) ctx.write(adr,ctx.sla(tmp, 1)) } // *** SRA r // ************** private val SRA_A = Opcode((0xCB,0x2F),8,2,"SRA A") { ctx => ctx.A = ctx.sra(ctx.A) } private val SRA_B = Opcode((0xCB,0x28),8,2,"SRA B") { ctx => ctx.B = ctx.sra(ctx.B) } private val SRA_C = Opcode((0xCB,0x29),8,2,"SRA C") { ctx => ctx.C = ctx.sra(ctx.C) } private val SRA_D = Opcode((0xCB,0x2A),8,2,"SRA D") { ctx => ctx.D = ctx.sra(ctx.D) } private val SRA_E = Opcode((0xCB,0x2B),8,2,"SRA E") { ctx => ctx.E = ctx.sra(ctx.E) } private val SRA_H = Opcode((0xCB,0x2C),8,2,"SRA H") { ctx => ctx.H = ctx.sra(ctx.H) } private val SRA_L = Opcode((0xCB,0x2D),8,2,"SRA L") { ctx => ctx.L = ctx.sra(ctx.L) } // *** SRA (HL) // ************** private val SRA_$HL$ = Opcode((0xCB,0x2E),15,2,"SRA (HL)") { ctx => val adr = ctx.HL val tmp = ctx.read(ctx.HL) ctx.io.internalOperation(1,adr) ctx.write(adr,ctx.sra(tmp)) } // *** SRA (IX+d) // ************** private val SRA_$IX_d$ = Opcode((0xDD,0xCB,0x2E),23,4,MNEMONIC_IXY_d("SRA (IX%s)")) { ctx => val adr = ctx.INDEX_+(ctx.byte(2)) val tmp = ctx.read(adr) ctx.io.internalOperation(1,adr) ctx.write(adr,ctx.sra(tmp)) } // *** SRL r // ************** private val SRL_A = Opcode((0xCB,0x3F),8,2,"SRL A") { ctx => ctx.A = ctx.srl(ctx.A) } private val SRL_B = Opcode((0xCB,0x38),8,2,"SRL B") { ctx => ctx.B = ctx.srl(ctx.B) } private val SRL_C = Opcode((0xCB,0x39),8,2,"SRL C") { ctx => ctx.C = ctx.srl(ctx.C) } private val SRL_D = Opcode((0xCB,0x3A),8,2,"SRL D") { ctx => ctx.D = ctx.srl(ctx.D) } private val SRL_E = Opcode((0xCB,0x3B),8,2,"SRL E") { ctx => ctx.E = ctx.srl(ctx.E) } private val SRL_H = Opcode((0xCB,0x3C),8,2,"SRL H") { ctx => ctx.H = ctx.srl(ctx.H) } private val SRL_L = Opcode((0xCB,0x3D),8,2,"SRL L") { ctx => ctx.L = ctx.srl(ctx.L) } // *** SRL (HL) // ************** private val SRL_$HL$ = Opcode((0xCB,0x3E),15,2,"SRL (HL)") { ctx => val adr = ctx.HL val tmp = ctx.read(ctx.HL) ctx.io.internalOperation(1,adr) ctx.write(adr,ctx.srl(tmp)) } // *** SRL (IX+d) // ************** private val SRL_$IX_d$ = Opcode((0xDD,0xCB,0x3E),23,4,MNEMONIC_IXY_d("SRL (IX%s)")) { ctx => val adr = ctx.INDEX_+(ctx.byte(2)) val tmp = ctx.read(adr) ctx.io.internalOperation(1,adr) ctx.write(adr,ctx.srl(tmp)) } // *** RLCA // ************** private val RLCA = Opcode(0x07,4,1,"RLCA") { ctx => ctx.rlca() } // *** RRCA // ************** private val RRCA = Opcode(0x0F,4,1,"RRCA") { ctx => ctx.rrca() } // *** RLA // ************** private val RLA = Opcode(0x17,4,1,"RLA") { ctx => ctx.rla() } // *** RRA // ************** private val RRA = Opcode(0x1F,4,1,"RRA") { ctx => ctx.rra() } // *** RLD // ************** private val RLD = Opcode((0xED,0x6F),18,2,"RLD") { ctx => ctx.rld } // *** RRD // ************** private val RRD = Opcode((0xED,0x67),18,2,"RRD") { ctx => ctx.rrd } // ==================================== Bit manipulation =================================================== // *** BIT b,r // ************** private val BIT_0_A = Opcode((0xCB,0x47),8,2,"BIT 0,A") { ctx => ctx.bit(0, ctx.A) } private val BIT_0_B = Opcode((0xCB,0x40),8,2,"BIT 0,B") { ctx => ctx.bit(0, ctx.B) } private val BIT_0_C = Opcode((0xCB,0x41),8,2,"BIT 0,C") { ctx => ctx.bit(0, ctx.C) } private val BIT_0_D = Opcode((0xCB,0x42),8,2,"BIT 0,D") { ctx => ctx.bit(0, ctx.D) } private val BIT_0_E = Opcode((0xCB,0x43),8,2,"BIT 0,E") { ctx => ctx.bit(0, ctx.E) } private val BIT_0_H = Opcode((0xCB,0x44),8,2,"BIT 0,H") { ctx => ctx.bit(0, ctx.H) } private val BIT_0_L = Opcode((0xCB,0x45),8,2,"BIT 0,L") { ctx => ctx.bit(0, ctx.L) } private val BIT_1_A = Opcode((0xCB,0x4F),8,2,"BIT 1,A") { ctx => ctx.bit(1, ctx.A) } private val BIT_1_B = Opcode((0xCB,0x48),8,2,"BIT 1,B") { ctx => ctx.bit(1, ctx.B) } private val BIT_1_C = Opcode((0xCB,0x49),8,2,"BIT 1,C") { ctx => ctx.bit(1, ctx.C) } private val BIT_1_D = Opcode((0xCB,0x4A),8,2,"BIT 1,D") { ctx => ctx.bit(1, ctx.D) } private val BIT_1_E = Opcode((0xCB,0x4B),8,2,"BIT 1,E") { ctx => ctx.bit(1, ctx.E) } private val BIT_1_H = Opcode((0xCB,0x4C),8,2,"BIT 1,H") { ctx => ctx.bit(1, ctx.H) } private val BIT_1_L = Opcode((0xCB,0x4D),8,2,"BIT 1,L") { ctx => ctx.bit(1, ctx.L) } private val BIT_2_A = Opcode((0xCB,0x57),8,2,"BIT 2,A") { ctx => ctx.bit(2, ctx.A) } private val BIT_2_B = Opcode((0xCB,0x50),8,2,"BIT 2,B") { ctx => ctx.bit(2, ctx.B) } private val BIT_2_C = Opcode((0xCB,0x51),8,2,"BIT 2,C") { ctx => ctx.bit(2, ctx.C) } private val BIT_2_D = Opcode((0xCB,0x52),8,2,"BIT 2,D") { ctx => ctx.bit(2, ctx.D) } private val BIT_2_E = Opcode((0xCB,0x53),8,2,"BIT 2,E") { ctx => ctx.bit(2, ctx.E) } private val BIT_2_H = Opcode((0xCB,0x54),8,2,"BIT 2,H") { ctx => ctx.bit(2, ctx.H) } private val BIT_2_L = Opcode((0xCB,0x55),8,2,"BIT 2,L") { ctx => ctx.bit(2, ctx.L) } private val BIT_3_A = Opcode((0xCB,0x5F),8,2,"BIT 3,A") { ctx => ctx.bit(3, ctx.A) } private val BIT_3_B = Opcode((0xCB,0x58),8,2,"BIT 3,B") { ctx => ctx.bit(3, ctx.B) } private val BIT_3_C = Opcode((0xCB,0x59),8,2,"BIT 3,C") { ctx => ctx.bit(3, ctx.C) } private val BIT_3_D = Opcode((0xCB,0x5A),8,2,"BIT 3,D") { ctx => ctx.bit(3, ctx.D) } private val BIT_3_E = Opcode((0xCB,0x5B),8,2,"BIT 3,E") { ctx => ctx.bit(3, ctx.E) } private val BIT_3_H = Opcode((0xCB,0x5C),8,2,"BIT 3,H") { ctx => ctx.bit(3, ctx.H) } private val BIT_3_L = Opcode((0xCB,0x5D),8,2,"BIT 3,L") { ctx => ctx.bit(3, ctx.L) } private val BIT_4_A = Opcode((0xCB,0x67),8,2,"BIT 4,A") { ctx => ctx.bit(4, ctx.A) } private val BIT_4_B = Opcode((0xCB,0x60),8,2,"BIT 4,B") { ctx => ctx.bit(4, ctx.B) } private val BIT_4_C = Opcode((0xCB,0x61),8,2,"BIT 4,C") { ctx => ctx.bit(4, ctx.C) } private val BIT_4_D = Opcode((0xCB,0x62),8,2,"BIT 4,D") { ctx => ctx.bit(4, ctx.D) } private val BIT_4_E = Opcode((0xCB,0x63),8,2,"BIT 4,E") { ctx => ctx.bit(4, ctx.E) } private val BIT_4_H = Opcode((0xCB,0x64),8,2,"BIT 4,H") { ctx => ctx.bit(4, ctx.H) } private val BIT_4_L = Opcode((0xCB,0x65),8,2,"BIT 4,L") { ctx => ctx.bit(4, ctx.L) } private val BIT_5_A = Opcode((0xCB,0x6F),8,2,"BIT 5,A") { ctx => ctx.bit(5, ctx.A) } private val BIT_5_B = Opcode((0xCB,0x68),8,2,"BIT 5,B") { ctx => ctx.bit(5, ctx.B) } private val BIT_5_C = Opcode((0xCB,0x69),8,2,"BIT 5,C") { ctx => ctx.bit(5, ctx.C) } private val BIT_5_D = Opcode((0xCB,0x6A),8,2,"BIT 5,D") { ctx => ctx.bit(5, ctx.D) } private val BIT_5_E = Opcode((0xCB,0x6B),8,2,"BIT 5,E") { ctx => ctx.bit(5, ctx.E) } private val BIT_5_H = Opcode((0xCB,0x6C),8,2,"BIT 5,H") { ctx => ctx.bit(5, ctx.H) } private val BIT_5_L = Opcode((0xCB,0x6D),8,2,"BIT 5,L") { ctx => ctx.bit(5, ctx.L) } private val BIT_6_A = Opcode((0xCB,0x77),8,2,"BIT 6,A") { ctx => ctx.bit(6, ctx.A) } private val BIT_6_B = Opcode((0xCB,0x70),8,2,"BIT 6,B") { ctx => ctx.bit(6, ctx.B) } private val BIT_6_C = Opcode((0xCB,0x71),8,2,"BIT 6,C") { ctx => ctx.bit(6, ctx.C) } private val BIT_6_D = Opcode((0xCB,0x72),8,2,"BIT 6,D") { ctx => ctx.bit(6, ctx.D) } private val BIT_6_E = Opcode((0xCB,0x73),8,2,"BIT 6,E") { ctx => ctx.bit(6, ctx.E) } private val BIT_6_H = Opcode((0xCB,0x74),8,2,"BIT 6,H") { ctx => ctx.bit(6, ctx.H) } private val BIT_6_L = Opcode((0xCB,0x75),8,2,"BIT 6,L") { ctx => ctx.bit(6, ctx.L) } private val BIT_7_A = Opcode((0xCB,0x7F),8,2,"BIT 7,A") { ctx => ctx.bit(7, ctx.A) } private val BIT_7_B = Opcode((0xCB,0x78),8,2,"BIT 7,B") { ctx => ctx.bit(7, ctx.B) } private val BIT_7_C = Opcode((0xCB,0x79),8,2,"BIT 7,C") { ctx => ctx.bit(7, ctx.C) } private val BIT_7_D = Opcode((0xCB,0x7A),8,2,"BIT 7,D") { ctx => ctx.bit(7, ctx.D) } private val BIT_7_E = Opcode((0xCB,0x7B),8,2,"BIT 7,E") { ctx => ctx.bit(7, ctx.E) } private val BIT_7_H = Opcode((0xCB,0x7C),8,2,"BIT 7,H") { ctx => ctx.bit(7, ctx.H) } private val BIT_7_L = Opcode((0xCB,0x7D),8,2,"BIT 7,L") { ctx => ctx.bit(7, ctx.L) } // *** BIT b,(HL) // ************** private val BIT_0_$HL$ = Opcode((0xCB,0x46),12,2,"BIT 0,(HL)") { ctx => ctx.bit(0, ctx.read(ctx.HL), 2) ; ctx.io.internalOperation(1,ctx.HL) } private val BIT_1_$HL$ = Opcode((0xCB,0x4E),12,2,"BIT 1,(HL)") { ctx => ctx.bit(1, ctx.read(ctx.HL), 2) ; ctx.io.internalOperation(1,ctx.HL) } private val BIT_2_$HL$ = Opcode((0xCB,0x56),12,2,"BIT 2,(HL)") { ctx => ctx.bit(2, ctx.read(ctx.HL), 2) ; ctx.io.internalOperation(1,ctx.HL) } private val BIT_3_$HL$ = Opcode((0xCB,0x5E),12,2,"BIT 3,(HL)") { ctx => ctx.bit(3, ctx.read(ctx.HL), 2) ; ctx.io.internalOperation(1,ctx.HL) } private val BIT_4_$HL$ = Opcode((0xCB,0x66),12,2,"BIT 4,(HL)") { ctx => ctx.bit(4, ctx.read(ctx.HL), 2) ; ctx.io.internalOperation(1,ctx.HL) } private val BIT_5_$HL$ = Opcode((0xCB,0x6E),12,2,"BIT 5,(HL)") { ctx => ctx.bit(5, ctx.read(ctx.HL), 2) ; ctx.io.internalOperation(1,ctx.HL) } private val BIT_6_$HL$ = Opcode((0xCB,0x76),12,2,"BIT 6,(HL)") { ctx => ctx.bit(6, ctx.read(ctx.HL), 2) ; ctx.io.internalOperation(1,ctx.HL) } private val BIT_7_$HL$ = Opcode((0xCB,0x7E),12,2,"BIT 7,(HL)") { ctx => ctx.bit(7, ctx.read(ctx.HL), 2) ; ctx.io.internalOperation(1,ctx.HL) } // *** BIT b,(IX + d) // ************** private val BIT_0_$IX_d$ = Opcode((0xDD,0xCB,0x46),20,4,MNEMONIC_IXY_d("BIT 0,(IX%s)")) { ctx => val addr = ctx.INDEX_+(ctx.byte(2)) ctx.bit(0, ctx.read(addr,1), 1) } private val BIT_1_$IX_d$ = Opcode((0xDD,0xCB,0x4E),20,4,MNEMONIC_IXY_d("BIT 1,(IX%s)")) { ctx => val addr = ctx.INDEX_+(ctx.byte(2)) ctx.bit(1, ctx.read(addr,1), 1) } private val BIT_2_$IX_d$ = Opcode((0xDD,0xCB,0x56),20,4,MNEMONIC_IXY_d("BIT 2,(IX%s)")) { ctx => val addr = ctx.INDEX_+(ctx.byte(2)) ctx.bit(2, ctx.read(addr,1), 1) } private val BIT_3_$IX_d$ = Opcode((0xDD,0xCB,0x5E),20,4,MNEMONIC_IXY_d("BIT 3,(IX%s)")) { ctx => val addr = ctx.INDEX_+(ctx.byte(2)) ctx.bit(3, ctx.read(addr,1), 1) } private val BIT_4_$IX_d$ = Opcode((0xDD,0xCB,0x66),20,4,MNEMONIC_IXY_d("BIT 4,(IX%s)")) { ctx => val addr = ctx.INDEX_+(ctx.byte(2)) ctx.bit(4, ctx.read(addr,1), 1) } private val BIT_5_$IX_d$ = Opcode((0xDD,0xCB,0x6E),20,4,MNEMONIC_IXY_d("BIT 5,(IX%s)")) { ctx => val addr = ctx.INDEX_+(ctx.byte(2)) ctx.bit(5, ctx.read(addr,1), 1) } private val BIT_6_$IX_d$ = Opcode((0xDD,0xCB,0x76),20,4,MNEMONIC_IXY_d("BIT 6,(IX%s)")) { ctx => val addr = ctx.INDEX_+(ctx.byte(2)) ctx.bit(6, ctx.read(addr,1), 1) } private val BIT_7_$IX_d$ = Opcode((0xDD,0xCB,0x7E),20,4,MNEMONIC_IXY_d("BIT 7,(IX%s)")) { ctx => val addr = ctx.INDEX_+(ctx.byte(2)) ctx.bit(7, ctx.read(addr,1), 1) } // *** RES b,r // ************** private val RES_0_A = Opcode((0xCB,0x87),8,2,"RES 0,A") { ctx => ctx.A = ctx.res(0,ctx.A) } private val RES_0_B = Opcode((0xCB,0x80),8,2,"RES 0,B") { ctx => ctx.B = ctx.res(0,ctx.B) } private val RES_0_C = Opcode((0xCB,0x81),8,2,"RES 0,C") { ctx => ctx.C = ctx.res(0,ctx.C) } private val RES_0_D = Opcode((0xCB,0x82),8,2,"RES 0,D") { ctx => ctx.D = ctx.res(0,ctx.D) } private val RES_0_E = Opcode((0xCB,0x83),8,2,"RES 0,E") { ctx => ctx.E = ctx.res(0,ctx.E) } private val RES_0_H = Opcode((0xCB,0x84),8,2,"RES 0,H") { ctx => ctx.H = ctx.res(0,ctx.H) } private val RES_0_L = Opcode((0xCB,0x85),8,2,"RES 0,L") { ctx => ctx.L = ctx.res(0,ctx.L) } private val RES_1_A = Opcode((0xCB,0x8F),8,2,"RES 1,A") { ctx => ctx.A = ctx.res(1,ctx.A) } private val RES_1_B = Opcode((0xCB,0x88),8,2,"RES 1,B") { ctx => ctx.B = ctx.res(1,ctx.B) } private val RES_1_C = Opcode((0xCB,0x89),8,2,"RES 1,C") { ctx => ctx.C = ctx.res(1,ctx.C) } private val RES_1_D = Opcode((0xCB,0x8A),8,2,"RES 1,D") { ctx => ctx.D = ctx.res(1,ctx.D) } private val RES_1_E = Opcode((0xCB,0x8B),8,2,"RES 1,E") { ctx => ctx.E = ctx.res(1,ctx.E) } private val RES_1_H = Opcode((0xCB,0x8C),8,2,"RES 1,H") { ctx => ctx.H = ctx.res(1,ctx.H) } private val RES_1_L = Opcode((0xCB,0x8D),8,2,"RES 1,L") { ctx => ctx.L = ctx.res(1,ctx.L) } private val RES_2_A = Opcode((0xCB,0x97),8,2,"RES 2,A") { ctx => ctx.A = ctx.res(2,ctx.A) } private val RES_2_B = Opcode((0xCB,0x90),8,2,"RES 2,B") { ctx => ctx.B = ctx.res(2,ctx.B) } private val RES_2_C = Opcode((0xCB,0x91),8,2,"RES 2,C") { ctx => ctx.C = ctx.res(2,ctx.C) } private val RES_2_D = Opcode((0xCB,0x92),8,2,"RES 2,D") { ctx => ctx.D = ctx.res(2,ctx.D) } private val RES_2_E = Opcode((0xCB,0x93),8,2,"RES 2,E") { ctx => ctx.E = ctx.res(2,ctx.E) } private val RES_2_H = Opcode((0xCB,0x94),8,2,"RES 2,H") { ctx => ctx.H = ctx.res(2,ctx.H) } private val RES_2_L = Opcode((0xCB,0x95),8,2,"RES 2,L") { ctx => ctx.L = ctx.res(2,ctx.L) } private val RES_3_A = Opcode((0xCB,0x9F),8,2,"RES 3,A") { ctx => ctx.A = ctx.res(3,ctx.A) } private val RES_3_B = Opcode((0xCB,0x98),8,2,"RES 3,B") { ctx => ctx.B = ctx.res(3,ctx.B) } private val RES_3_C = Opcode((0xCB,0x99),8,2,"RES 3,C") { ctx => ctx.C = ctx.res(3,ctx.C) } private val RES_3_D = Opcode((0xCB,0x9A),8,2,"RES 3,D") { ctx => ctx.D = ctx.res(3,ctx.D) } private val RES_3_E = Opcode((0xCB,0x9B),8,2,"RES 3,E") { ctx => ctx.E = ctx.res(3,ctx.E) } private val RES_3_H = Opcode((0xCB,0x9C),8,2,"RES 3,H") { ctx => ctx.H = ctx.res(3,ctx.H) } private val RES_3_L = Opcode((0xCB,0x9D),8,2,"RES 3,L") { ctx => ctx.L = ctx.res(3,ctx.L) } private val RES_4_A = Opcode((0xCB,0xA7),8,2,"RES 4,A") { ctx => ctx.A = ctx.res(4,ctx.A) } private val RES_4_B = Opcode((0xCB,0xA0),8,2,"RES 4,B") { ctx => ctx.B = ctx.res(4,ctx.B) } private val RES_4_C = Opcode((0xCB,0xA1),8,2,"RES 4,C") { ctx => ctx.C = ctx.res(4,ctx.C) } private val RES_4_D = Opcode((0xCB,0xA2),8,2,"RES 4,D") { ctx => ctx.D = ctx.res(4,ctx.D) } private val RES_4_E = Opcode((0xCB,0xA3),8,2,"RES 4,E") { ctx => ctx.E = ctx.res(4,ctx.E) } private val RES_4_H = Opcode((0xCB,0xA4),8,2,"RES 4,H") { ctx => ctx.H = ctx.res(4,ctx.H) } private val RES_4_L = Opcode((0xCB,0xA5),8,2,"RES 4,L") { ctx => ctx.L = ctx.res(4,ctx.L) } private val RES_5_A = Opcode((0xCB,0xAF),8,2,"RES 5,A") { ctx => ctx.A = ctx.res(5,ctx.A) } private val RES_5_B = Opcode((0xCB,0xA8),8,2,"RES 5,B") { ctx => ctx.B = ctx.res(5,ctx.B) } private val RES_5_C = Opcode((0xCB,0xA9),8,2,"RES 5,C") { ctx => ctx.C = ctx.res(5,ctx.C) } private val RES_5_D = Opcode((0xCB,0xAA),8,2,"RES 5,D") { ctx => ctx.D = ctx.res(5,ctx.D) } private val RES_5_E = Opcode((0xCB,0xAB),8,2,"RES 5,E") { ctx => ctx.E = ctx.res(5,ctx.E) } private val RES_5_H = Opcode((0xCB,0xAC),8,2,"RES 5,H") { ctx => ctx.H = ctx.res(5,ctx.H) } private val RES_5_L = Opcode((0xCB,0xAD),8,2,"RES 5,L") { ctx => ctx.L = ctx.res(5,ctx.L) } private val RES_6_A = Opcode((0xCB,0xB7),8,2,"RES 6,A") { ctx => ctx.A = ctx.res(6,ctx.A) } private val RES_6_B = Opcode((0xCB,0xB0),8,2,"RES 6,B") { ctx => ctx.B = ctx.res(6,ctx.B) } private val RES_6_C = Opcode((0xCB,0xB1),8,2,"RES 6,C") { ctx => ctx.C = ctx.res(6,ctx.C) } private val RES_6_D = Opcode((0xCB,0xB2),8,2,"RES 6,D") { ctx => ctx.D = ctx.res(6,ctx.D) } private val RES_6_E = Opcode((0xCB,0xB3),8,2,"RES 6,E") { ctx => ctx.E = ctx.res(6,ctx.E) } private val RES_6_H = Opcode((0xCB,0xB4),8,2,"RES 6,H") { ctx => ctx.H = ctx.res(6,ctx.H) } private val RES_6_L = Opcode((0xCB,0xB5),8,2,"RES 6,L") { ctx => ctx.L = ctx.res(6,ctx.L) } private val RES_7_A = Opcode((0xCB,0xBF),8,2,"RES 7,A") { ctx => ctx.A = ctx.res(7,ctx.A) } private val RES_7_B = Opcode((0xCB,0xB8),8,2,"RES 7,B") { ctx => ctx.B = ctx.res(7,ctx.B) } private val RES_7_C = Opcode((0xCB,0xB9),8,2,"RES 7,C") { ctx => ctx.C = ctx.res(7,ctx.C) } private val RES_7_D = Opcode((0xCB,0xBA),8,2,"RES 7,D") { ctx => ctx.D = ctx.res(7,ctx.D) } private val RES_7_E = Opcode((0xCB,0xBB),8,2,"RES 7,E") { ctx => ctx.E = ctx.res(7,ctx.E) } private val RES_7_H = Opcode((0xCB,0xBC),8,2,"RES 7,H") { ctx => ctx.H = ctx.res(7,ctx.H) } private val RES_7_L = Opcode((0xCB,0xBD),8,2,"RES 7,L") { ctx => ctx.L = ctx.res(7,ctx.L) } // *** RES b,(HL) // ************** private val RES_0_$HL$ = Opcode((0xCB,0x86),15,2,"RES 0,(HL)") { ctx => ctx.write(ctx.HL,ctx.res(0,ctx.read(ctx.HL,1))) } private val RES_1_$HL$ = Opcode((0xCB,0x8E),15,2,"RES 1,(HL)") { ctx => ctx.write(ctx.HL,ctx.res(1,ctx.read(ctx.HL,1))) } private val RES_2_$HL$ = Opcode((0xCB,0x96),15,2,"RES 2,(HL)") { ctx => ctx.write(ctx.HL,ctx.res(2,ctx.read(ctx.HL,1))) } private val RES_3_$HL$ = Opcode((0xCB,0x9E),15,2,"RES 3,(HL)") { ctx => ctx.write(ctx.HL,ctx.res(3,ctx.read(ctx.HL,1))) } private val RES_4_$HL$ = Opcode((0xCB,0xA6),15,2,"RES 4,(HL)") { ctx => ctx.write(ctx.HL,ctx.res(4,ctx.read(ctx.HL,1))) } private val RES_5_$HL$ = Opcode((0xCB,0xAE),15,2,"RES 5,(HL)") { ctx => ctx.write(ctx.HL,ctx.res(5,ctx.read(ctx.HL,1))) } private val RES_6_$HL$ = Opcode((0xCB,0xB6),15,2,"RES 6,(HL)") { ctx => ctx.write(ctx.HL,ctx.res(6,ctx.read(ctx.HL,1))) } private val RES_7_$HL$ = Opcode((0xCB,0xBE),15,2,"RES 7,(HL)") { ctx => ctx.write(ctx.HL,ctx.res(7,ctx.read(ctx.HL,1))) } // *** RES b,(IX + d) // ************** private val RES_0_$IX_d$ = Opcode((0xDD,0xCB,0x86),23,4,MNEMONIC_IXY_d("RES 0,(IX%s)")) { ctx => val adr = ctx.INDEX_+(ctx.byte(2)) ; ctx.write(adr,ctx.res(0,ctx.read(adr,1))) } private val RES_1_$IX_d$ = Opcode((0xDD,0xCB,0x8E),23,4,MNEMONIC_IXY_d("RES 1,(IX%s)")) { ctx => val adr = ctx.INDEX_+(ctx.byte(2)) ; ctx.write(adr,ctx.res(1,ctx.read(adr,1))) } private val RES_2_$IX_d$ = Opcode((0xDD,0xCB,0x96),23,4,MNEMONIC_IXY_d("RES 2,(IX%s)")) { ctx => val adr = ctx.INDEX_+(ctx.byte(2)) ; ctx.write(adr,ctx.res(2,ctx.read(adr,1))) } private val RES_3_$IX_d$ = Opcode((0xDD,0xCB,0x9E),23,4,MNEMONIC_IXY_d("RES 3,(IX%s)")) { ctx => val adr = ctx.INDEX_+(ctx.byte(2)) ; ctx.write(adr,ctx.res(3,ctx.read(adr,1))) } private val RES_4_$IX_d$ = Opcode((0xDD,0xCB,0xA6),23,4,MNEMONIC_IXY_d("RES 4,(IX%s)")) { ctx => val adr = ctx.INDEX_+(ctx.byte(2)) ; ctx.write(adr,ctx.res(4,ctx.read(adr,1))) } private val RES_5_$IX_d$ = Opcode((0xDD,0xCB,0xAE),23,4,MNEMONIC_IXY_d("RES 5,(IX%s)")) { ctx => val adr = ctx.INDEX_+(ctx.byte(2)) ; ctx.write(adr,ctx.res(5,ctx.read(adr,1))) } private val RES_6_$IX_d$ = Opcode((0xDD,0xCB,0xB6),23,4,MNEMONIC_IXY_d("RES 6,(IX%s)")) { ctx => val adr = ctx.INDEX_+(ctx.byte(2)) ; ctx.write(adr,ctx.res(6,ctx.read(adr,1))) } private val RES_7_$IX_d$ = Opcode((0xDD,0xCB,0xBE),23,4,MNEMONIC_IXY_d("RES 7,(IX%s)")) { ctx => val adr = ctx.INDEX_+(ctx.byte(2)) ; ctx.write(adr,ctx.res(7,ctx.read(adr,1))) } // *** SET b,r // ************** private val SET_0_A = Opcode((0xCB,0xC7),8,2,"SET 0,A") { ctx => ctx.A = ctx.set(0,ctx.A) } private val SET_0_B = Opcode((0xCB,0xC0),8,2,"SET 0,B") { ctx => ctx.B = ctx.set(0,ctx.B) } private val SET_0_C = Opcode((0xCB,0xC1),8,2,"SET 0,C") { ctx => ctx.C = ctx.set(0,ctx.C) } private val SET_0_D = Opcode((0xCB,0xC2),8,2,"SET 0,D") { ctx => ctx.D = ctx.set(0,ctx.D) } private val SET_0_E = Opcode((0xCB,0xC3),8,2,"SET 0,E") { ctx => ctx.E = ctx.set(0,ctx.E) } private val SET_0_H = Opcode((0xCB,0xC4),8,2,"SET 0,H") { ctx => ctx.H = ctx.set(0,ctx.H) } private val SET_0_L = Opcode((0xCB,0xC5),8,2,"SET 0,L") { ctx => ctx.L = ctx.set(0,ctx.L) } private val SET_1_A = Opcode((0xCB,0xCF),8,2,"SET 1,A") { ctx => ctx.A = ctx.set(1,ctx.A) } private val SET_1_B = Opcode((0xCB,0xC8),8,2,"SET 1,B") { ctx => ctx.B = ctx.set(1,ctx.B) } private val SET_1_C = Opcode((0xCB,0xC9),8,2,"SET 1,C") { ctx => ctx.C = ctx.set(1,ctx.C) } private val SET_1_D = Opcode((0xCB,0xCA),8,2,"SET 1,D") { ctx => ctx.D = ctx.set(1,ctx.D) } private val SET_1_E = Opcode((0xCB,0xCB),8,2,"SET 1,E") { ctx => ctx.E = ctx.set(1,ctx.E) } private val SET_1_H = Opcode((0xCB,0xCC),8,2,"SET 1,H") { ctx => ctx.H = ctx.set(1,ctx.H) } private val SET_1_L = Opcode((0xCB,0xCD),8,2,"SET 1,L") { ctx => ctx.L = ctx.set(1,ctx.L) } private val SET_2_A = Opcode((0xCB,0xD7),8,2,"SET 2,A") { ctx => ctx.A = ctx.set(2,ctx.A) } private val SET_2_B = Opcode((0xCB,0xD0),8,2,"SET 2,B") { ctx => ctx.B = ctx.set(2,ctx.B) } private val SET_2_C = Opcode((0xCB,0xD1),8,2,"SET 2,C") { ctx => ctx.C = ctx.set(2,ctx.C) } private val SET_2_D = Opcode((0xCB,0xD2),8,2,"SET 2,D") { ctx => ctx.D = ctx.set(2,ctx.D) } private val SET_2_E = Opcode((0xCB,0xD3),8,2,"SET 2,E") { ctx => ctx.E = ctx.set(2,ctx.E) } private val SET_2_H = Opcode((0xCB,0xD4),8,2,"SET 2,H") { ctx => ctx.H = ctx.set(2,ctx.H) } private val SET_2_L = Opcode((0xCB,0xD5),8,2,"SET 2,L") { ctx => ctx.L = ctx.set(2,ctx.L) } private val SET_3_A = Opcode((0xCB,0xDF),8,2,"SET 3,A") { ctx => ctx.A = ctx.set(3,ctx.A) } private val SET_3_B = Opcode((0xCB,0xD8),8,2,"SET 3,B") { ctx => ctx.B = ctx.set(3,ctx.B) } private val SET_3_C = Opcode((0xCB,0xD9),8,2,"SET 3,C") { ctx => ctx.C = ctx.set(3,ctx.C) } private val SET_3_D = Opcode((0xCB,0xDA),8,2,"SET 3,D") { ctx => ctx.D = ctx.set(3,ctx.D) } private val SET_3_E = Opcode((0xCB,0xDB),8,2,"SET 3,E") { ctx => ctx.E = ctx.set(3,ctx.E) } private val SET_3_H = Opcode((0xCB,0xDC),8,2,"SET 3,H") { ctx => ctx.H = ctx.set(3,ctx.H) } private val SET_3_L = Opcode((0xCB,0xDD),8,2,"SET 3,L") { ctx => ctx.L = ctx.set(3,ctx.L) } private val SET_4_A = Opcode((0xCB,0xE7),8,2,"SET 4,A") { ctx => ctx.A = ctx.set(4,ctx.A) } private val SET_4_B = Opcode((0xCB,0xE0),8,2,"SET 4,B") { ctx => ctx.B = ctx.set(4,ctx.B) } private val SET_4_C = Opcode((0xCB,0xE1),8,2,"SET 4,C") { ctx => ctx.C = ctx.set(4,ctx.C) } private val SET_4_D = Opcode((0xCB,0xE2),8,2,"SET 4,D") { ctx => ctx.D = ctx.set(4,ctx.D) } private val SET_4_E = Opcode((0xCB,0xE3),8,2,"SET 4,E") { ctx => ctx.E = ctx.set(4,ctx.E) } private val SET_4_H = Opcode((0xCB,0xE4),8,2,"SET 4,H") { ctx => ctx.H = ctx.set(4,ctx.H) } private val SET_4_L = Opcode((0xCB,0xE5),8,2,"SET 4,L") { ctx => ctx.L = ctx.set(4,ctx.L) } private val SET_5_A = Opcode((0xCB,0xEF),8,2,"SET 5,A") { ctx => ctx.A = ctx.set(5,ctx.A) } private val SET_5_B = Opcode((0xCB,0xE8),8,2,"SET 5,B") { ctx => ctx.B = ctx.set(5,ctx.B) } private val SET_5_C = Opcode((0xCB,0xE9),8,2,"SET 5,C") { ctx => ctx.C = ctx.set(5,ctx.C) } private val SET_5_D = Opcode((0xCB,0xEA),8,2,"SET 5,D") { ctx => ctx.D = ctx.set(5,ctx.D) } private val SET_5_E = Opcode((0xCB,0xEB),8,2,"SET 5,E") { ctx => ctx.E = ctx.set(5,ctx.E) } private val SET_5_H = Opcode((0xCB,0xEC),8,2,"SET 5,H") { ctx => ctx.H = ctx.set(5,ctx.H) } private val SET_5_L = Opcode((0xCB,0xED),8,2,"SET 5,L") { ctx => ctx.L = ctx.set(5,ctx.L) } private val SET_6_A = Opcode((0xCB,0xF7),8,2,"SET 6,A") { ctx => ctx.A = ctx.set(6,ctx.A) } private val SET_6_B = Opcode((0xCB,0xF0),8,2,"SET 6,B") { ctx => ctx.B = ctx.set(6,ctx.B) } private val SET_6_C = Opcode((0xCB,0xF1),8,2,"SET 6,C") { ctx => ctx.C = ctx.set(6,ctx.C) } private val SET_6_D = Opcode((0xCB,0xF2),8,2,"SET 6,D") { ctx => ctx.D = ctx.set(6,ctx.D) } private val SET_6_E = Opcode((0xCB,0xF3),8,2,"SET 6,E") { ctx => ctx.E = ctx.set(6,ctx.E) } private val SET_6_H = Opcode((0xCB,0xF4),8,2,"SET 6,H") { ctx => ctx.H = ctx.set(6,ctx.H) } private val SET_6_L = Opcode((0xCB,0xF5),8,2,"SET 6,L") { ctx => ctx.L = ctx.set(6,ctx.L) } private val SET_7_A = Opcode((0xCB,0xFF),8,2,"SET 7,A") { ctx => ctx.A = ctx.set(7,ctx.A) } private val SET_7_B = Opcode((0xCB,0xF8),8,2,"SET 7,B") { ctx => ctx.B = ctx.set(7,ctx.B) } private val SET_7_C = Opcode((0xCB,0xF9),8,2,"SET 7,C") { ctx => ctx.C = ctx.set(7,ctx.C) } private val SET_7_D = Opcode((0xCB,0xFA),8,2,"SET 7,D") { ctx => ctx.D = ctx.set(7,ctx.D) } private val SET_7_E = Opcode((0xCB,0xFB),8,2,"SET 7,E") { ctx => ctx.E = ctx.set(7,ctx.E) } private val SET_7_H = Opcode((0xCB,0xFC),8,2,"SET 7,H") { ctx => ctx.H = ctx.set(7,ctx.H) } private val SET_7_L = Opcode((0xCB,0xFD),8,2,"SET 7,L") { ctx => ctx.L = ctx.set(7,ctx.L) } // *** SET b,(HL) // ************** private val SET_0_$HL$ = Opcode((0xCB,0xC6),15,2,"SET 0,(HL)") { ctx => val tmp = ctx.read(ctx.HL) ctx.io.internalOperation(1,ctx.HL) ctx.write(ctx.HL,ctx.set(0,tmp)) } private val SET_1_$HL$ = Opcode((0xCB,0xCE),15,2,"SET 1,(HL)") { ctx => val tmp = ctx.read(ctx.HL) ctx.io.internalOperation(1,ctx.HL) ctx.write(ctx.HL,ctx.set(1,tmp)) } private val SET_2_$HL$ = Opcode((0xCB,0xD6),15,2,"SET 2,(HL)") { ctx => val tmp = ctx.read(ctx.HL) ctx.io.internalOperation(1,ctx.HL) ctx.write(ctx.HL,ctx.set(2,tmp)) } private val SET_3_$HL$ = Opcode((0xCB,0xDE),15,2,"SET 3,(HL)") { ctx => val tmp = ctx.read(ctx.HL) ctx.io.internalOperation(1,ctx.HL) ctx.write(ctx.HL,ctx.set(3,tmp)) } private val SET_4_$HL$ = Opcode((0xCB,0xE6),15,2,"SET 4,(HL)") { ctx => val tmp = ctx.read(ctx.HL) ctx.io.internalOperation(1,ctx.HL) ctx.write(ctx.HL,ctx.set(4,tmp)) } private val SET_5_$HL$ = Opcode((0xCB,0xEE),15,2,"SET 5,(HL)") { ctx => val tmp = ctx.read(ctx.HL) ctx.io.internalOperation(1,ctx.HL) ctx.write(ctx.HL,ctx.set(5,tmp)) } private val SET_6_$HL$ = Opcode((0xCB,0xF6),15,2,"SET 6,(HL)") { ctx => val tmp = ctx.read(ctx.HL) ctx.io.internalOperation(1,ctx.HL) ctx.write(ctx.HL,ctx.set(6,tmp)) } private val SET_7_$HL$ = Opcode((0xCB,0xFE),15,2,"SET 7,(HL)") { ctx => val tmp = ctx.read(ctx.HL) ctx.io.internalOperation(1,ctx.HL) ctx.write(ctx.HL,ctx.set(7,tmp)) } // *** SET b,(IX + d) // ************** private val SET_0_$IX_d$ = Opcode((0xDD,0xCB,0xC6),23,4,MNEMONIC_IXY_d("SET 0,(IX%s)")) { ctx => val adr = ctx.INDEX_+(ctx.byte(2)) ; ctx.write(adr,ctx.set(0,ctx.read(adr,1))) } private val SET_1_$IX_d$ = Opcode((0xDD,0xCB,0xCE),23,4,MNEMONIC_IXY_d("SET 1,(IX%s)")) { ctx => val adr = ctx.INDEX_+(ctx.byte(2)) ; ctx.write(adr,ctx.set(1,ctx.read(adr,1))) } private val SET_2_$IX_d$ = Opcode((0xDD,0xCB,0xD6),23,4,MNEMONIC_IXY_d("SET 2,(IX%s)")) { ctx => val adr = ctx.INDEX_+(ctx.byte(2)) ; ctx.write(adr,ctx.set(2,ctx.read(adr,1))) } private val SET_3_$IX_d$ = Opcode((0xDD,0xCB,0xDE),23,4,MNEMONIC_IXY_d("SET 3,(IX%s)")) { ctx => val adr = ctx.INDEX_+(ctx.byte(2)) ; ctx.write(adr,ctx.set(3,ctx.read(adr,1))) } private val SET_4_$IX_d$ = Opcode((0xDD,0xCB,0xE6),23,4,MNEMONIC_IXY_d("SET 4,(IX%s)")) { ctx => val adr = ctx.INDEX_+(ctx.byte(2)) ; ctx.write(adr,ctx.set(4,ctx.read(adr,1))) } private val SET_5_$IX_d$ = Opcode((0xDD,0xCB,0xEE),23,4,MNEMONIC_IXY_d("SET 5,(IX%s)")) { ctx => val adr = ctx.INDEX_+(ctx.byte(2)) ; ctx.write(adr,ctx.set(5,ctx.read(adr,1))) } private val SET_6_$IX_d$ = Opcode((0xDD,0xCB,0xF6),23,4,MNEMONIC_IXY_d("SET 6,(IX%s)")) { ctx => val adr = ctx.INDEX_+(ctx.byte(2)) ; ctx.write(adr,ctx.set(6,ctx.read(adr,1))) } private val SET_7_$IX_d$ = Opcode((0xDD,0xCB,0xFE),23,4,MNEMONIC_IXY_d("SET 7,(IX%s)")) { ctx => val adr = ctx.INDEX_+(ctx.byte(2)) ; ctx.write(adr,ctx.set(7,ctx.read(adr,1))) } // ========================================= Jump Call and Return Group ==================================== // *** JP nn // ************** private val JP_nn = Opcode(0xC3,10,3,MNEMONIC_nn("JP %s"),modifyPC = true) { ctx => val addr = ctx.word(1) ctx.PC = addr ctx.memptr = addr } // *** JP cc,nn // ************** private val JP_C_nn = Opcode(0xDA,10,3,MNEMONIC_nn("JP C,%s"),modifyPC = true) { ctx => ctx.jp_cond_nn(ctx.carry > 0) } private val JP_NC_nn = Opcode(0xD2,10,3,MNEMONIC_nn("JP NC,%s"),modifyPC = true) { ctx => ctx.jp_cond_nn(ctx.carry == 0) } private val JP_Z_nn = Opcode(0xCA,10,3,MNEMONIC_nn("JP Z,%s"),modifyPC = true) { ctx => ctx.jp_cond_nn(ctx.zero > 0) } private val JP_NZ_nn = Opcode(0xC2,10,3,MNEMONIC_nn("JP NZ,%s"),modifyPC = true) { ctx => ctx.jp_cond_nn(ctx.zero == 0) } private val JP_PO_nn = Opcode(0xE2,10,3,MNEMONIC_nn("JP PO,%s"),modifyPC = true) { ctx => ctx.jp_cond_nn(ctx.parity == 0) } private val JP_PE_nn = Opcode(0xEA,10,3,MNEMONIC_nn("JP PE,%s"),modifyPC = true) { ctx => ctx.jp_cond_nn(ctx.parity > 0) } private val JP_P_nn = Opcode(0xF2,10,3,MNEMONIC_nn("JP P,%s"),modifyPC = true) { ctx => ctx.jp_cond_nn(ctx.sign == 0) } private val JP_M_nn = Opcode(0xFA,10,3,MNEMONIC_nn("JP M,%s"),modifyPC = true) { ctx => ctx.jp_cond_nn(ctx.sign > 0) } // *** JR e // ************** @inline private def MNEMONIC_jr(pattern:String) = (m:Memory,PC:Int) => pattern.format(hex4(PC + 2 + m.read(PC + 1).asInstanceOf[Byte])) private val JR_e = Opcode(0x18,12,2,MNEMONIC_jr("JR %s"),modifyPC = true) { ctx => ctx.jre_e } // *** JR cc,e // ************** private val JR_C_e = Opcode(0x38,12,2,MNEMONIC_jr("JR C,%s"),modifyPC = true) { ctx => ctx.jr_cond_e(ctx.carry > 0) } private val JR_NC_e = Opcode(0x30,12,2,MNEMONIC_jr("JR NC,%s"),modifyPC = true) { ctx => ctx.jr_cond_e(ctx.carry == 0) } private val JR_Z_e = Opcode(0x28,12,2,MNEMONIC_jr("JR Z,%s"),modifyPC = true) { ctx => ctx.jr_cond_e(ctx.zero > 0) } private val JR_NZ_e = Opcode(0x20,12,2,MNEMONIC_jr("JR NZ,%s"),modifyPC = true) { ctx => ctx.jr_cond_e(ctx.zero == 0) } // *** JP (HL) // ************** private val JP_$HL$ = Opcode(0xE9,4,1,"JP (HL)",modifyPC = true) { ctx => ctx.PC = ctx.HL } // *** JP (IX) // ************** private val JP_$IX$ = Opcode((0xDD,0xE9),8,1,"JP (IX)",modifyPC = true) { ctx => ctx.PC = ctx.INDEX } // *** CALL nn // ************** private val CALL_nn = Opcode(0xCD,17,3,MNEMONIC_nn("CALL %s"),modifyPC = true) { ctx => val tmp = ctx.word(1) ctx.io.internalOperation(1, (ctx.PC + 1) & 0xFFFF) ctx.call(tmp) } // *** CALL cc,nn // ************** private val CALL_C_nn = Opcode(0xDC,10,3,MNEMONIC_nn("CALL C,%s"),modifyPC = true) { ctx => ctx.call_cond_nn(ctx.carry > 0) } private val CALL_NC_nn = Opcode(0xD4,10,3,MNEMONIC_nn("CALL NC,%s"),modifyPC = true) { ctx => ctx.call_cond_nn(ctx.carry == 0) } private val CALL_Z_nn = Opcode(0xCC,10,3,MNEMONIC_nn("CALL Z,%s"),modifyPC = true) { ctx => ctx.call_cond_nn(ctx.zero > 0) } private val CALL_NZ_nn = Opcode(0xC4,10,3,MNEMONIC_nn("CALL NZ,%s"),modifyPC = true) { ctx => ctx.call_cond_nn(ctx.zero == 0) } private val CALL_PO_nn = Opcode(0xE4,10,3,MNEMONIC_nn("CALL PO,%s"),modifyPC = true) { ctx => ctx.call_cond_nn(ctx.parity == 0) } private val CALL_PE_nn = Opcode(0xEC,10,3,MNEMONIC_nn("CALL PE,%s"),modifyPC = true) { ctx => ctx.call_cond_nn(ctx.parity > 0) } private val CALL_P_nn = Opcode(0xF4,10,3,MNEMONIC_nn("CALL P,%s"),modifyPC = true) { ctx => ctx.call_cond_nn(ctx.sign == 0) } private val CALL_M_nn = Opcode(0xFC,10,3,MNEMONIC_nn("CALL M,%s"),modifyPC = true) { ctx => ctx.call_cond_nn(ctx.sign > 0) } // *** DJNZ e // ************** private val DJNZ_e = Opcode(0x10,8,2,MNEMONIC_jr("DJNZ %s"),modifyPC = true) { ctx => ctx.djnz } // *** RET // ************** private val RET = Opcode(0xC9,10,1,"RET",modifyPC = true) { ctx => val addr = ctx.pop ; ctx.PC = addr ; ctx.memptr = addr } // *** RET cc // ************** private val RET_C_nn = Opcode(0xD8,5,1,"RET C",modifyPC = true) { ctx => ctx.ret_cond(ctx.carry > 0) } private val RET_NC_nn = Opcode(0xD0,5,1,"RET NC",modifyPC = true) { ctx => ctx.ret_cond(ctx.carry == 0) } private val RET_Z_nn = Opcode(0xC8,5,1,"RET Z",modifyPC = true) { ctx => ctx.ret_cond(ctx.zero > 0) } private val RET_NZ_nn = Opcode(0xC0,5,1,"RET NZ",modifyPC = true) { ctx => ctx.ret_cond(ctx.zero == 0) } private val RET_PO_nn = Opcode(0xE0,5,1,"RET PO",modifyPC = true) { ctx => ctx.ret_cond(ctx.parity == 0) } private val RET_PE_nn = Opcode(0xE8,5,1,"RET PE",modifyPC = true) { ctx => ctx.ret_cond(ctx.parity > 0) } private val RET_P_nn = Opcode(0xF0,5,1,"RET P",modifyPC = true) { ctx => ctx.ret_cond(ctx.sign == 0) } private val RET_M_nn = Opcode(0xF8,5,1,"RET M",modifyPC = true) { ctx => ctx.ret_cond(ctx.sign > 0) } // *** RETI // ************** private val RETI = Opcode((0xED,0x4D),14,2,"RETI",modifyPC = true) { ctx => ctx.retni() } private val RETN = Opcode((0xED,0x45),14,2,"RETN",modifyPC = true,Array(0x55,0x65,0x75,0x5D,0x6D,0x7D)) { ctx => ctx.retni() } // *** RST p // ************** private val RST_0 = Opcode(0xC7,11,1,"RST 0",modifyPC = true) { ctx => ctx.rst(0x00) } private val RST_8 = Opcode(0xCF,11,1,"RST 8",modifyPC = true) { ctx => ctx.rst(0x08) } private val RST_10 = Opcode(0xD7,11,1,"RST 10",modifyPC = true) { ctx => ctx.rst(0x10) } private val RST_18 = Opcode(0xDF,11,1,"RST 18",modifyPC = true) { ctx => ctx.rst(0x18) } private val RST_20 = Opcode(0xE7,11,1,"RST 20",modifyPC = true) { ctx => ctx.rst(0x20) } private val RST_28 = Opcode(0xEF,11,1,"RST 28",modifyPC = true) { ctx => ctx.rst(0x28) } private val RST_30 = Opcode(0xF7,11,1,"RST 30",modifyPC = true) { ctx => ctx.rst(0x30) } private val RST_38 = Opcode(0xFF,11,1,"RST 38",modifyPC = true) { ctx => ctx.rst(0x38) } // ====================================== Input Group ====================================================== // *** IN A,n // ************** private val IN_A_n = Opcode(0xDB,11,2,MNEMONIC_n("IN A,%s")) { ctx => ctx.in_a_n } // *** IN r,(C) // ************** private val IN_A_$C$ = Opcode((0xED,0x78),12,2,"IN A,(C)") { ctx => ctx.A = ctx.in_r_c() } private val IN_B_$C$ = Opcode((0xED,0x40),12,2,"IN B,(C)") { ctx => ctx.B = ctx.in_r_c() } private val IN_C_$C$ = Opcode((0xED,0x48),12,2,"IN C,(C)") { ctx => ctx.C = ctx.in_r_c() } private val IN_D_$C$ = Opcode((0xED,0x50),12,2,"IN D,(C)") { ctx => ctx.D = ctx.in_r_c() } private val IN_E_$C$ = Opcode((0xED,0x58),12,2,"IN E,(C)") { ctx => ctx.E = ctx.in_r_c() } private val IN_H_$C$ = Opcode((0xED,0x60),12,2,"IN H,(C)") { ctx => ctx.H = ctx.in_r_c() } private val IN_L_$C$ = Opcode((0xED,0x68),12,2,"IN L,(C)") { ctx => ctx.L = ctx.in_r_c() } // *** INI // ************** /* * mp := ((c)), (hl) := tmp, hl += 1, b -= 1 => flags, nf := tmp.7, tmp2 := tmp + [[c +/- 1] AND 0xff], pf := parity of [[tmp2 AND 0x07] XOR b], hf := cf := tmp2 > 255 */ private val INI = Opcode((0xED,0xA2),16,2,"INI") { ctx => ctx.ini(inc = true) } // *** INIR // ************** private val INIR = Opcode((0xED,0xB2),16,2,"INIR",modifyPC = true) { ctx => val hl = ctx.HL ctx.ini(true) if (ctx.B == 0) ctx.incPC(2)//ctx.PC = (ctx.PC + 2) & 0xFFFF else { ctx.setAdditionalClockCycles(5) ctx.io.internalOperation(5,hl) } } // *** IND // ************** private val IND = Opcode((0xED,0xAA),16,2,"IND") { ctx => ctx.ini(inc = false) } // *** INDR // ************** private val INDR = Opcode((0xED,0xBA),16,2,"INDR",modifyPC = true) { ctx => val hl = ctx.HL ctx.ini(inc = false) if (ctx.B == 0) ctx.incPC(2)//ctx.PC = (ctx.PC + 2) & 0xFFFF else { ctx.setAdditionalClockCycles(5) ctx.io.internalOperation(5,hl) } } // ====================================== Output Group ===================================================== // *** OUT (n),A // ************** private val OUT_$n$_A = Opcode(0xD3,11,2,MNEMONIC_n("OUT (%s),A")) { ctx => val port = ctx.byte(1) ctx.io.out(ctx.A,port,ctx.A) ctx.memptr = (port + 1) & 0xFF | ctx.A << 8 } // *** OUT (C),r // ************** private val OUT_$C$_A = Opcode((0xED,0x79),12,2,"OUT (C),A") { ctx => ctx.out_c_r(ctx.A) } private val OUT_$C$_B = Opcode((0xED,0x41),12,2,"OUT (C),B") { ctx => ctx.out_c_r(ctx.B) } private val OUT_$C$_C = Opcode((0xED,0x49),12,2,"OUT (C),C") { ctx => ctx.out_c_r(ctx.C) } private val OUT_$C$_D = Opcode((0xED,0x51),12,2,"OUT (C),D") { ctx => ctx.out_c_r(ctx.D) } private val OUT_$C$_E = Opcode((0xED,0x59),12,2,"OUT (C),E") { ctx => ctx.out_c_r(ctx.E) } private val OUT_$C$_H = Opcode((0xED,0x61),12,2,"OUT (C),H") { ctx => ctx.out_c_r(ctx.H) } private val OUT_$C$_L = Opcode((0xED,0x69),12,2,"OUT (C),L") { ctx => ctx.out_c_r(ctx.L) } // *** OUTI // ************** private val OUTI = Opcode((0xED,0xA3),16,2,"OUTI") { ctx => ctx.outi(inc = true) } // *** OTIR // ************** private val OTIR = Opcode((0xED,0xB3),16,2,"OTIR",modifyPC = true) { ctx => ctx.outi(inc = true) if (ctx.B == 0) ctx.incPC(2)//ctx.PC = (ctx.PC + 2) & 0xFFFF else { ctx.setAdditionalClockCycles(5) ctx.io.internalOperation(5,ctx.BC) } } // *** OUTD // ************** private val OUTD = Opcode((0xED,0xAB),16,2,"OUTD") { ctx =>ctx.outi(inc = false) } // *** OTDR // ************** private val OTDR = Opcode((0xED,0xBB),16,2,"OTDR",modifyPC = true) { ctx => ctx.outi(inc = false) if (ctx.B == 0) ctx.incPC(2)//ctx.PC = (ctx.PC + 2) & 0xFFFF else { ctx.setAdditionalClockCycles(5) ctx.io.internalOperation(5,ctx.BC) } } // UNDOCUMENTED // ** IN (C) // ************** private val IN$C$ = Opcode((0xED,0x70),12,2,"IN (C)") { ctx => ctx.in_r_c() } // UNDOCUMENTED // ** OUT (C),0 // ************** private val OUT$C$0 = Opcode((0xED,0x71),12,2,"OUT (C),0") { ctx => ctx.out_c_r(0) } // ========================================================================================================= // ====================================== Reflection ======================================================= def initOpcodes : Unit = { if (opcodes_1(0) != null) return val fields = getClass.getDeclaredFields val opcodes = fields filter { _.getType == classOf[Z80.Opcode] } map { f => f.setAccessible(true); f.get(this).asInstanceOf[Opcode] } for(o <- opcodes) { o.opcodes match { case Array(op) => if (opcodes_1(op) == null) opcodes_1(op) = o else { println(s"$op already set"); sys.exit(-1) } case Array(0xED,op) => if (opcodes_ed(op) == null) opcodes_ed(op) = o else { println(s"0xED,$op already set"); sys.exit(-1) } if (o.copyopcodes != null) for(cop <- o.copyopcodes) { // copy the same Opcode to other hex if (opcodes_ed(cop) == null) opcodes_ed(cop) = o.copy(opcodes = Array(0xED,cop))(o.executeFunction) else { println(s"0xED,$op already set"); sys.exit(-1) } } case Array(0xCB,op) => if (opcodes_cb(op) == null) opcodes_cb(op) = o else { println(s"0xCB,$op already set"); sys.exit(-1) } case Array(0xDD,0xCB,op) => if (opcodes_ddcb(op) == null) opcodes_ddcb(op) = o else { println(s"0xDD,0xCB,_,$op already set"); sys.exit(-1) } case Array(0xDD,op) => if (opcodes_dd(op) == null) opcodes_dd(op) = o else { println(s"0xDD,$op already set"); sys.exit(-1) } case x => println(s"Fatal error: opcodes ${x.mkString(",")} unknown") sys.exit(-1) } } // FD copy for DD and DDCB for(o <- 0 to 1; i <- 0 to 0xFF) { val opcode = o match { case 0 => opcodes_dd(i) case 1 => opcodes_ddcb(i) } if (opcode != null) { val codes = Array.ofDim[Int](opcode.opcodes.length) System.arraycopy(opcode.opcodes,0,codes,0,codes.length) codes(0) = 0xFD val mnemonic = (m : Memory, v : Int) => { val s = opcode.getMnemonic(m,v) s.replaceAll("IX","IY") } o match { case 0 => opcodes_fd(i) = Opcode(codes,opcode.cycles,opcode.size,mnemonic,opcode.modifyPC)(new FD(opcode.executeFunction)) case 1 => opcodes_fdcb(i) = Opcode(codes,opcode.cycles,opcode.size,mnemonic,opcode.modifyPC)(new FD(opcode.executeFunction)) } } } // CB undocumented val regMnem = Array("B","C","D","E","H","L","_","A") for(o <- List(0xDD,0xFD)) { val opcodes = o match { case 0xDD => opcodes_ddcb case 0xFD => opcodes_fdcb } for (y <- 0 to 0xF) { for (c <- List(0x06,0x0E)) { val opcode = opcodes(y << 4 | c) val range = if (c == 0x06) (0 to 5) ++ (0x07 to 0x07) else (0x08 to 0x0D) ++ (0x0F to 0x0F) for(x <- range) { val code = y << 4 | x if (y >= 4 && y <= 7) { // BIT if (opcodes(code) == null) opcodes(code) = Opcode((o,0xCB,code),opcode.cycles,opcode.size,opcode.getMnemonic)(ctx => { opcode.executeFunction(ctx) }) else { println(s"CB Undocumented ${o.toHexString} 0xCB ${code.toHexString} already set"); sys.exit(-1) } } else { val reg = x & 7 val f = (ctx:Context) => { opcode.executeFunction(ctx) reg match { case 0 => ctx.B = ctx.lastWrite case 1 => ctx.C = ctx.lastWrite case 2 => ctx.D = ctx.lastWrite case 3 => ctx.E = ctx.lastWrite case 4 => ctx.H = ctx.lastWrite case 5 => ctx.L = ctx.lastWrite case 7 => ctx.A = ctx.lastWrite } } val mnem = (m:Memory,s:Int) => s"${opcode.getMnemonic(m,s)},${regMnem(reg)}" if (opcodes(code) == null) opcodes(code) = Opcode((o,0xCB,code),opcode.cycles,opcode.size,mnem)(f) else { println(s"CB Undocumented ${o.toHexString} 0xCB ${code.toHexString} already set"); sys.exit(-1) } } } } } } } // ========================================================================================================= } /** * @author ealeame */ class Z80(mem:Memory, io_memory:Z80.IOMemory = null, trapListener : Z80.Context => Unit = null, undocHandler : Z80.Context => Int = null) extends Chip with TraceListener { val id: ID = ChipID.CPU override lazy val componentID = "Z80" import Z80._ val ctx = new Context(mem,io_memory) final val M1FETCH_PIN = 1 final val REFRESH_PIN = 2 final val DUMMY_READ_PIN = 4 private[this] var irqLow,nmiLow,nmiOnNegativeEdge = false private[this] var im2LowByte = 0 private[this] var M1Fetch,refresh,dummyRead = false private[this] var cpuWaitUntil = 0L private[this] var cpuRestCycles = 0.0 private[this] var busREQ = false private[this] var tracing = false private[this] var stepCallBack : CpuStepInfo => Unit = _ private[this] val syncObject = new Object private[this] var breakCallBack : CpuStepInfo => Unit = _ private[this] var breakType : BreakType = _ private[this] var lastPC = 0 override def getProperties: Properties = { properties.setProperty("Context",ctx.toString) properties.setProperty("IRQ pending",irqLow.toString) properties.setProperty("NMI pending",nmiLow.toString) properties } // =================================== Tracing ============================================================= def setCycleMode(cycleMode: Boolean): Unit = {} def setTraceOnFile(out:PrintWriter,enabled:Boolean) : Unit = { // TODO } def setTrace(traceOn:Boolean): Unit = tracing = traceOn def step(updateRegisters: CpuStepInfo => Unit) : Unit = { stepCallBack = updateRegisters syncObject.synchronized { syncObject.notify } } def setBreakAt(breakType:BreakType,callback:CpuStepInfo => Unit) : Unit = { tracing = false breakCallBack = callback this.breakType = breakType match { case NoBreak => null case _ => breakType } } def jmpTo(pc:Int) : Unit = { ctx.PC = pc & 0xFFFF } def disassemble(mem:Memory,address:Int) : (String,Int) = { try { dummyRead = true val adr = Array(address) val opcode = fetch(adr) (opcode.disassemble(mem, adr(0)), opcode.size) } finally { dummyRead = false } } def getLastPC : Int = lastPC // =================================== Interrupt Handling ================================================== final def irq(low:Boolean,im2LowByte : Int = 0): Unit = { irqLow = low this.im2LowByte = im2LowByte } final def nmi(low:Boolean) : Unit = { if (!nmiLow && low) { nmiOnNegativeEdge = true } nmiLow = low } // ======================================== Bus Request (threee state) ===================================== def requestBUS(request:Boolean) = busREQ = request // ======================================== Fetch & Execute ================================================ def isM1Fetch : Boolean = M1Fetch def isRefresh : Boolean = refresh def isDummyRead : Boolean = dummyRead def pins : Int = (if (M1Fetch) M1FETCH_PIN else 0) | (if (refresh) REFRESH_PIN else 0) | (if (dummyRead) DUMMY_READ_PIN else 0) def init : Unit = { Log.info("Z80 initializing opcodes...") Z80.initOpcodes } def reset : Unit = { ctx.reset irqLow = false nmiLow = false nmiOnNegativeEdge = false M1Fetch = false refresh = false dummyRead = false } @inline private[this] def fetch(addr:Array[Int] = null) : Opcode = { M1Fetch = true try { val pc = if (addr == null) ctx.PC else addr(0) val op = mem.read(pc) if (addr == null) ctx.incR(1) refreshCycle // single opcode val opcode = opcodes_1(op) if (opcode != null) return opcode // extended val op1 = mem.read((pc + 1) & 0xFFFF) if (addr == null) ctx.incR(1) refreshCycle // ED if (op == 0xED) { val op2 = opcodes_ed(op1) if (op2 == null) { ctx.incPC()//ctx.PC = (ctx.PC + 1) & 0xFFFF ctx.setAdditionalClockCycles(4) return NOP } else return op2 } // CB if (op == 0xCB) return opcodes_cb(op1) // DD if (op == 0xDD || op == 0xFD) { var opcodes_xxcb,opcodes_xx : Array[Opcode] = null if (op == 0xDD) { opcodes_xxcb = opcodes_ddcb opcodes_xx = opcodes_dd } else { opcodes_xxcb = opcodes_fdcb opcodes_xx = opcodes_fd } if (op1 == 0xCB) { val lastDummy = dummyRead dummyRead = true val op2 = mem.read((pc + 3) & 0xFFFF) dummyRead = lastDummy return opcodes_xxcb(op2) } else { opcodes_xx(op1) match { case null => // skip prefix if (addr == null) { ctx.incPC()//ctx.PC = (ctx.PC + 1) & 0xFFFF ctx.setAdditionalClockCycles(4) } else addr(0) = (pc + 1) & 0xFFFF val c2 = opcodes_1(op1) if (c2 == null) return NOP // if op1 is DD or FD else return c2 case xxcode => return xxcode } } } null } finally { M1Fetch = false } } @inline private def refreshCycle : Unit = { refresh = true val refreshAddress = ctx.I << 8 | ctx.R & 0x7F mem.read(refreshAddress) refresh = false } @inline private def interruptMode0Handling : Unit = { // RST 38 ctx.PC = 0x38 } @inline private def interruptMode2Handling : Unit = { val addr = ctx.I << 8 | im2LowByte ctx.PC = (mem.read(addr + 1) << 8) | mem.read(addr) } final def clock : Int = { if (breakType != null && breakType.isBreak(ctx.PC,false,false)) { tracing = true breakCallBack(CpuStepInfo(ctx.PC,ctx.toString)) } if ((irqLow || nmiOnNegativeEdge) && !ctx.mustDelayInt) { // any interrupt pending ? ctx.lastQ = false if (nmiOnNegativeEdge) { // NMI if (ctx.halted) { ctx.halted = false ctx.incPC()//ctx.PC = (ctx.PC + 1) & 0xFFFF } ctx.io.internalOperation(5) ctx.push(ctx.PC) ctx.incR(1) refreshCycle if (breakType != null && breakType.isBreak(ctx.PC,false,true)) { tracing = true breakCallBack(CpuStepInfo(ctx.PC,ctx.toString)) Log.debug("NMI Break") } nmiOnNegativeEdge = false ctx.IFF2 = ctx.IFF1 ctx.IFF1 = 0 ctx.PC = 0x0066 return 11 } else { // IRQ if (ctx.IFF1 == 1) { if (ctx.halted) { ctx.halted = false ctx.incPC()//ctx.PC = (ctx.PC + 1) & 0xFFFF } ctx.im match { case 0 => ctx.io.internalOperation(6) case 1|2 => ctx.io.internalOperation(7) } ctx.push(ctx.PC) ctx.incR(1) if (breakType != null && breakType.isBreak(ctx.PC,true,false)) { tracing = true breakCallBack(CpuStepInfo(ctx.PC,ctx.toString)) Log.debug("IRQ Break") } ctx.IFF1 = 0 ctx.IFF2 = 0 ctx.im match { case 0 => interruptMode0Handling return 12 case 1 => ctx.PC = 0x38 return 13 case 2 => interruptMode2Handling return 19 } return 0 } } } if (trapListener != null) trapListener(ctx) ctx.setDelayInt(false) val opcode = fetch() if (opcode == null) { if (undocHandler == null) throw new IllegalArgumentException(s"Can't find opcode at ${hex4(ctx.PC)}: ${hex2(mem.read(ctx.PC))} ${hex2(mem.read(ctx.PC + 1))} ${hex2(mem.read(ctx.PC + 2))}") else { return undocHandler(ctx) } } // tracing if (tracing) { try { dummyRead = true Log.debug("[Z80] " + opcode.disassemble(mem, ctx.PC)) } finally { dummyRead = false } stepCallBack(CpuStepInfo(ctx.PC,ctx.toString)) syncObject.synchronized { syncObject.wait } } // execute lastPC = ctx.PC ctx.copyQ opcode.executeFunction(ctx) val clocks = opcode.cycles + ctx.getAdditionalClockSycles if (!opcode.modifyPC) ctx.incPC(opcode.size)//ctx.PC = (ctx.PC + opcode.size) & 0xFFFF clocks } // ======================================== Clock ========================================================== final def clock(cycles:Long,scaleFactor:Double = 1) : Unit = { val canExecCPU = cycles > cpuWaitUntil && !busREQ if (canExecCPU) { val nextCPUCycles = cpuRestCycles + cycles + (clock - 1) / scaleFactor cpuWaitUntil = nextCPUCycles.toInt cpuRestCycles = nextCPUCycles - cpuWaitUntil } } // state override protected def saveState(out:ObjectOutputStream) : Unit = { out.writeBoolean(irqLow) out.writeBoolean(nmiLow) out.writeBoolean(nmiOnNegativeEdge) out.writeLong(cpuWaitUntil) ctx.saveState(out) } override protected def loadState(in:ObjectInputStream) : Unit = { irqLow = in.readBoolean nmiLow = in.readBoolean nmiOnNegativeEdge = in.readBoolean cpuWaitUntil = in.readLong ctx.loadState(in) } override protected def allowsStateRestoring : Boolean = true }
abbruzze/kernal64
Kernal64/src/ucesoft/cbm/cpu/Z80.scala
Scala
mit
128,691
package org.moe.runtime import scala.collection.mutable.{HashMap,Map} import org.moe.runtime.nativeobjects.MoePairObject class MoeSignature( private val params: List[MoeParameter] = List() ) extends MoeObject { lazy val arity = params.length lazy val namedParameterMap: Map[String,MoeParameter] = Map( params.filter(_ match { case (x: MoeNamedParameter) => true case _ => false }).map( p => p.getKeyName -> p ):_* ) private def checkType (n: String, o: MoeObject) = { if (!MoeType.checkType(n, o)) throw new MoeErrors.IncompatibleType( "the argument (" + n + ") is not compatible with " + o.getAssociatedType.map(_.getName).getOrElse("NO TYPE") ) } def getParams = params def bindArgsToEnv (args: MoeArguments, env: MoeEnvironment) = { val r = env.getCurrentRuntime.get var extra: List[MoeObject] = List() for (i <- 0.until(arity)) { params(i) match { case MoePositionalParameter(name) => { val arg = args.getArgAt(i).getOrElse( throw new MoeErrors.MissingParameter(name) ) checkType(name, arg) env.create(name, arg) } case MoeOptionalParameter(name) => args.getArgAt(i) match { case Some(a) => { checkType(name, a) env.create(name, a) } case None => { // no need to check type because // we know that undef will pass env.create(name, r.NativeObjects.getUndef) } } case MoeSlurpyParameter(name) => env.create( name, r.NativeObjects.getArray(args.slurpArgsAt(i):_*) ) case MoeSlurpyNamedParameter(name) => env.create( name, r.NativeObjects.getHash(args.slurpArgsAt(i).map(_.unboxToTuple.get):_*) ) case MoeDefaultValueParameter(name, value) => args.getArgAt(i) match { case Some(a) => { checkType(name, a) env.create(name, a) } case None => { env.create(name, value) } } case _ => extra = args.getArgAt(i).get :: extra } } if (!args.wereAllArgsConsumed) { throw new MoeErrors.InvalidParameter( "Not all arguments were consumed " + args.consumedArgCount + " were consumed, but we had " + args.getArgCount + " with signature (" + params.map(_.getName).mkString(", ") + ")" ) } for (arg <- extra) { arg match { case (a: MoePairObject) => { val k = a.key(r).unboxToString.get val p = namedParameterMap.get(k).getOrElse( throw new MoeErrors.MissingParameter("Could not find matching parameter key for " + k) ) val v = a.value(r) val n = p.getName checkType(n, v) env.create(n, v) } case _ => throw new MoeErrors.IncompatibleType("argument was not a pair") } } } }
MoeOrganization/moe
src/main/scala/org/moe/runtime/MoeSignature.scala
Scala
mit
3,061
/** * * Copyright (C) 2017 University of Bamberg, Software Technologies Research Group * <https://www.uni-bamberg.de/>, <http://www.swt-bamberg.de/> * * This file is part of the Data Structure Investigator (DSI) project, which received financial support by the * German Research Foundation (DFG) under grant no. LU 1748/4-1, see * <http://www.swt-bamberg.de/dsi/>. * * DSI is licensed under the GNU GENERAL PUBLIC LICENSE (Version 3), see * the LICENSE file at the project's top-level directory for details or consult <http://www.gnu.org/licenses/>. * * DSI is free software: you can redistribute it and/or modify it under the * terms of the GNU General Public License as published by the Free Software * Foundation, either version 3 of the License, or any later version. * * DSI is a RESEARCH PROTOTYPE and distributed WITHOUT ANY * WARRANTY, without even the implied warranty of MERCHANTABILITY or FITNESS * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more * details. * * The following people contributed to the conception and realization of the present DSI distribution (in * alphabetic order by surname): * * - Jan H. Boockmann * - Gerald Lüttgen * - Thomas Rupprecht * - David H. White * */ /** * @author DSI * * DsOliEPT.scala created on Jan 28, 2015 * * Description: Represents an entry pointer tag * used for operation detection */ package entrypoint import scala.collection.mutable.ListBuffer import pointstograph.DsOliVertexMemory import entrypoint.Feature._ import extlogger.DsOliLogger import scala.collection.mutable.HashMap /** * @author DSI * * @constructor creates an entry pointer tag * @param epVertex the entry pointer vertex * @param epVertexId the id of the entry pointer vertex * @param oep the offset where the pointer originates at ep vertex * @param Aup most upstream cell * @param creationTime time step of the event trace when this EPT was created * @param Qf list of observed features for this EPT */ class DsOliEPT(val id: Long, var epVertex: DsOliVertexMemory, var epVertexId: Long, var oep: Long, var Aup: DsOliEPTCell, var creationTime: Long, var Qf: ListBuffer[Feature]) { // Store possible artificial features for a particular time step var artificialQfs = new HashMap[Int, ListBuffer[Feature]]() val classSignature = "DsOliEPT::" def this(epVertex: DsOliVertexMemory, epVertexId: Long, oep: Long, Aup: DsOliEPTCell, creationTime: Long, Qf: ListBuffer[Feature]) = this(DsOliEPT.getId, epVertex, epVertexId, oep, Aup, creationTime, Qf) /** * Deep copy of an EPT, where each feature * of the feature list is copied over * * @return a copy of the EPT */ def deepCopy(): DsOliEPT = { val funSignature = classSignature + "deepCopy: " DsOliLogger.debug(funSignature + "entered.") val newQf = this.Qf.map(f => identity(f)) DsOliLogger.debug(funSignature + "newQf: " + newQf) return new DsOliEPT(this.id, this.epVertex, this.epVertexId, this.oep, this.Aup, this.creationTime, newQf) } override def toString(): String = { "[" + this.getClass() + ": epVertexId = " + epVertexId + "," + "oep = " + oep.toHexString + "," + "Aup = " + Aup + "," + "creationTime = " + creationTime.toHexString + "," + "Qf = " + Qf + "]" } } object DsOliEPT { type EPTId = Long var id: Long = 0 /** * Simple unique EPT ID generator * @return unique EPT ID */ def getId(): Long = { val retId = id; id += 1 return retId } }
uniba-swt/DSIsrc
src/entrypoint/DsOliEPT.scala
Scala
gpl-3.0
3,520
object Test: extension [A](a: A) def render: String = "Hi" extension [B](b: B) def render(using DummyImplicit): Char = 'x' val test = { 42.render // error Test.render(42) // error }
lampepfl/dotty
tests/neg/i6183.scala
Scala
apache-2.0
199
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.execution.datasources.v2 import org.apache.spark.sql.catalyst.expressions.{Expression, SortOrder} import org.apache.spark.sql.catalyst.expressions.V2ExpressionUtils.toCatalyst import org.apache.spark.sql.catalyst.plans.logical.{LogicalPlan, RepartitionByExpression, Sort} import org.apache.spark.sql.connector.distributions.{ClusteredDistribution, OrderedDistribution, UnspecifiedDistribution} import org.apache.spark.sql.connector.write.{RequiresDistributionAndOrdering, Write} import org.apache.spark.sql.errors.QueryCompilationErrors import org.apache.spark.sql.internal.SQLConf object DistributionAndOrderingUtils { def prepareQuery(write: Write, query: LogicalPlan, conf: SQLConf): LogicalPlan = write match { case write: RequiresDistributionAndOrdering => val numPartitions = write.requiredNumPartitions() val distribution = write.requiredDistribution match { case d: OrderedDistribution => d.ordering.map(e => toCatalyst(e, query)) case d: ClusteredDistribution => d.clustering.map(e => toCatalyst(e, query)) case _: UnspecifiedDistribution => Array.empty[Expression] } val queryWithDistribution = if (distribution.nonEmpty) { val finalNumPartitions = if (numPartitions > 0) { numPartitions } else { conf.numShufflePartitions } // the conversion to catalyst expressions above produces SortOrder expressions // for OrderedDistribution and generic expressions for ClusteredDistribution // this allows RepartitionByExpression to pick either range or hash partitioning RepartitionByExpression(distribution, query, finalNumPartitions) } else if (numPartitions > 0) { throw QueryCompilationErrors.numberOfPartitionsNotAllowedWithUnspecifiedDistributionError() } else { query } val ordering = write.requiredOrdering.toSeq .map(e => toCatalyst(e, query)) .asInstanceOf[Seq[SortOrder]] val queryWithDistributionAndOrdering = if (ordering.nonEmpty) { Sort(ordering, global = false, queryWithDistribution) } else { queryWithDistribution } queryWithDistributionAndOrdering case _ => query } }
ueshin/apache-spark
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/DistributionAndOrderingUtils.scala
Scala
apache-2.0
3,065
/* * Copyright (C) 2009-2017 Lightbend Inc. <https://www.lightbend.com> */ package play.routes.compiler import java.io.File import java.nio.charset.Charset import org.apache.commons.io.FileUtils import scala.util.parsing.combinator._ import scala.util.parsing.input._ import scala.language.postfixOps object RoutesFileParser { /** * Parse the given routes file * * @param routesFile The routes file to parse * @return Either the list of compilation errors encountered, or a list of routing rules */ def parse(routesFile: File): Either[Seq[RoutesCompilationError], List[Rule]] = { val routesContent = FileUtils.readFileToString(routesFile, Charset.defaultCharset()) parseContent(routesContent, routesFile) } /** * Parse the given routes file content * * @param routesContent The content of the routes file * @param routesFile The routes file (used for error reporting) * @return Either the list of compilation errors encountered, or a list of routing rules */ def parseContent(routesContent: String, routesFile: File): Either[Seq[RoutesCompilationError], List[Rule]] = { val parser = new RoutesFileParser() parser.parse(routesContent) match { case parser.Success(parsed: List[Rule], _) => validate(routesFile, parsed.collect { case r: Route => r }) match { case Nil => Right(parsed) case errors => Left(errors) } case parser.NoSuccess(message, in) => Left(Seq(RoutesCompilationError(routesFile, message, Some(in.pos.line), Some(in.pos.column)))) } } /** * Validate the routes file */ private def validate(file: java.io.File, routes: List[Route]): Seq[RoutesCompilationError] = { import scala.collection.mutable._ val errors = ListBuffer.empty[RoutesCompilationError] routes.foreach { route => if (route.call.packageName.isEmpty) { errors += RoutesCompilationError( file, "Missing package name", Some(route.call.pos.line), Some(route.call.pos.column)) } if (route.call.controller.isEmpty) { errors += RoutesCompilationError( file, "Missing Controller", Some(route.call.pos.line), Some(route.call.pos.column)) } route.path.parts.collect { case part @ DynamicPart(name, regex, _) => { route.call.parameters.getOrElse(Nil).find(_.name == name).map { p => if (p.fixed.isDefined || p.default.isDefined) { errors += RoutesCompilationError( file, "It is not allowed to specify a fixed or default value for parameter: '" + name + "' extracted from the path", Some(p.pos.line), Some(p.pos.column)) } try { java.util.regex.Pattern.compile(regex) } catch { case e: Exception => { errors += RoutesCompilationError( file, e.getMessage, Some(part.pos.line), Some(part.pos.column)) } } }.getOrElse { errors += RoutesCompilationError( file, "Missing parameter in call definition: " + name, Some(part.pos.line), Some(part.pos.column)) } } } } // make sure there are no routes using overloaded handler methods, or handler methods with default parameters without declaring them all val sameHandlerMethodGroup = routes.groupBy { r => r.call.packageName + r.call.controller + r.call.method } val sameHandlerMethodParameterCountGroup = sameHandlerMethodGroup.groupBy { g => (g._1, g._2.groupBy(route => route.call.parameters.map(p => p.length).getOrElse(0))) } sameHandlerMethodParameterCountGroup.find(g => g._1._2.size > 1).foreach { overloadedRouteGroup => val firstOverloadedRoute = overloadedRouteGroup._2.values.head.head errors += RoutesCompilationError( file, "Using different overloaded methods is not allowed. If you are using a single method in combination with default parameters, make sure you declare them all explicitly.", Some(firstOverloadedRoute.call.pos.line), Some(firstOverloadedRoute.call.pos.column) ) } errors.toList } } /** * The routes file parser */ private[routes] class RoutesFileParser extends JavaTokenParsers { override def skipWhitespace = false override val whiteSpace = """[ \\t]+""".r def EOF: util.matching.Regex = "\\\\z".r def namedError[A](p: Parser[A], msg: String): Parser[A] = Parser[A] { i => p(i) match { case Failure(_, in) => Failure(msg, in) case o => o } } def several[T](p: => Parser[T]): Parser[List[T]] = Parser { in => import scala.collection.mutable.ListBuffer val elems = new ListBuffer[T] def continue(in: Input): ParseResult[List[T]] = { val p0 = p // avoid repeatedly re-evaluating by-name parser @scala.annotation.tailrec def applyp(in0: Input): ParseResult[List[T]] = p0(in0) match { case Success(x, rest) => elems += x; applyp(rest) case Failure(_, _) => Success(elems.toList, in0) case err: Error => err } applyp(in) } continue(in) } def separator: Parser[String] = namedError(whiteSpace, "Whitespace expected") def ignoreWhiteSpace: Parser[Option[String]] = opt(whiteSpace) // This won't be needed when we upgrade to Scala 2.11, we will then be able to use JavaTokenParser.ident: // https://github.com/scala/scala/pull/1466 def javaIdent: Parser[String] = """\\p{javaJavaIdentifierStart}\\p{javaJavaIdentifierPart}*""".r def tickedIdent: Parser[String] = """`[^`]+`""".r def identifier: Parser[String] = namedError(javaIdent, "Identifier expected") def tickedIdentifier: Parser[String] = namedError(tickedIdent, "Identifier expected") def end: util.matching.Regex = """\\s*""".r def comment: Parser[Comment] = "#" ~> ".*".r ^^ { case c => Comment(c) } def newLine: Parser[String] = namedError((("\\r"?) ~> "\\n"), "End of line expected") def blankLine: Parser[Unit] = ignoreWhiteSpace ~> newLine ^^ { case _ => () } def parentheses: Parser[String] = { "(" ~ (several((parentheses | not(")") ~> """.""".r))) ~ commit(")") ^^ { case p1 ~ charList ~ p2 => p1 + charList.mkString + p2 } } def brackets: Parser[String] = { "[" ~ (several((parentheses | not("]") ~> """.""".r))) ~ commit("]") ^^ { case p1 ~ charList ~ p2 => p1 + charList.mkString + p2 } } def string: Parser[String] = { "\\"" ~ (several((parentheses | not("\\"") ~> """.""".r))) ~ commit("\\"") ^^ { case p1 ~ charList ~ p2 => p1 + charList.mkString + p2 } } def multiString: Parser[String] = { "\\"\\"\\"" ~ (several((parentheses | not("\\"\\"\\"") ~> """.""".r))) ~ commit("\\"\\"\\"") ^^ { case p1 ~ charList ~ p2 => p1 + charList.mkString + p2 } } def httpVerb: Parser[HttpVerb] = namedError("GET" | "POST" | "PUT" | "PATCH" | "HEAD" | "DELETE" | "OPTIONS", "HTTP Verb expected") ^^ { case v => HttpVerb(v) } def singleComponentPathPart: Parser[DynamicPart] = (":" ~> identifier) ^^ { case name => DynamicPart(name, """[^/]+""", encode = true) } def multipleComponentsPathPart: Parser[DynamicPart] = ("*" ~> identifier) ^^ { case name => DynamicPart(name, """.+""", encode = false) } def regexComponentPathPart: Parser[DynamicPart] = "$" ~> identifier ~ ("<" ~> (not(">") ~> """[^\\s]""".r +) <~ ">" ^^ { case c => c.mkString }) ^^ { case name ~ regex => DynamicPart(name, regex, encode = false) } def staticPathPart: Parser[StaticPart] = (not(":") ~> not("*") ~> not("$") ~> """[^\\s]""".r +) ^^ { case chars => StaticPart(chars.mkString) } def path: Parser[PathPattern] = "/" ~ ((positioned(singleComponentPathPart) | positioned(multipleComponentsPathPart) | positioned(regexComponentPathPart) | staticPathPart) *) ^^ { case _ ~ parts => PathPattern(parts) } def space(s: String): Parser[String] = (ignoreWhiteSpace ~> s <~ ignoreWhiteSpace) def parameterType: Parser[String] = ":" ~> ignoreWhiteSpace ~> simpleType def simpleType: Parser[String] = { ((stableId <~ ignoreWhiteSpace) ~ opt(typeArgs)) ^^ { case sid ~ ta => sid.toString + ta.getOrElse("") } | (space("(") ~ types ~ space(")")) ^^ { case _ ~ b ~ _ => "(" + b + ")" } } def typeArgs: Parser[String] = { (space("[") ~ types ~ space("]") ~ opt(typeArgs)) ^^ { case _ ~ ts ~ _ ~ ta => "[" + ts + "]" + ta.getOrElse("") } | (space("#") ~ identifier ~ opt(typeArgs)) ^^ { case _ ~ id ~ ta => "#" + id + ta.getOrElse("") } } def types: Parser[String] = rep1sep(simpleType, space(",")) ^^ (_ mkString ",") def stableId: Parser[String] = rep1sep(identifier, space(".")) ^^ (_ mkString ".") def expression: Parser[String] = (multiString | string | parentheses | brackets | """[^),?=\\n]""".r +) ^^ { case p => p.mkString } def parameterFixedValue: Parser[String] = "=" ~ ignoreWhiteSpace ~ expression ^^ { case a ~ _ ~ b => a + b } def parameterDefaultValue: Parser[String] = "?=" ~ ignoreWhiteSpace ~ expression ^^ { case a ~ _ ~ b => a + b } def parameter: Parser[Parameter] = ((identifier | tickedIdentifier) <~ ignoreWhiteSpace) ~ opt(parameterType) ~ (ignoreWhiteSpace ~> opt(parameterDefaultValue | parameterFixedValue)) ^^ { case name ~ t ~ d => Parameter(name, t.getOrElse("String"), d.filter(_.startsWith("=")).map(_.drop(1)), d.filter(_.startsWith("?")).map(_.drop(2))) } def parameters: Parser[List[Parameter]] = "(" ~> repsep(ignoreWhiteSpace ~> positioned(parameter) <~ ignoreWhiteSpace, ",") <~ ")" // Absolute method consists of a series of Java identifiers representing the package name, controller and method. // Since the Scala parser is greedy, we can't easily extract this out, so just parse at least 3 def absoluteMethod: Parser[List[String]] = namedError(javaIdent ~ "." ~ javaIdent ~ "." ~ rep1sep(javaIdent, ".") ^^ { case first ~ _ ~ second ~ _ ~ rest => first :: second :: rest }, "Controller method call expected") def call: Parser[HandlerCall] = opt("@") ~ absoluteMethod ~ opt(parameters) ^^ { case instantiate ~ absMethod ~ parameters => { val (packageParts, classAndMethod) = absMethod.splitAt(absMethod.size - 2) val packageName = packageParts.mkString(".") val className = classAndMethod(0) val methodName = classAndMethod(1) val dynamic = !instantiate.isEmpty HandlerCall(packageName, className, dynamic, methodName, parameters) } } def router: Parser[String] = rep1sep(identifier, ".") ^^ { case parts => parts.mkString(".") } def route = httpVerb ~! separator ~ path ~ separator ~ positioned(call) ~ ignoreWhiteSpace ^^ { case v ~ _ ~ p ~ _ ~ c ~ _ => Route(v, p, c) } def include = "->" ~! separator ~ path ~ separator ~ router ~ ignoreWhiteSpace ^^ { case _ ~ _ ~ p ~ _ ~ r ~ _ => Include(p.toString, r) } def sentence: Parser[Product with Serializable] = namedError((comment | positioned(include) | positioned(route)), "HTTP Verb (GET, POST, ...), include (->) or comment (#) expected") <~ (newLine | EOF) def parser: Parser[List[Rule]] = phrase((blankLine | sentence *) <~ end) ^^ { case routes => routes.reverse.foldLeft(List[(Option[Rule], List[Comment])]()) { case (s, r @ Route(_, _, _, _)) => (Some(r), List()) :: s case (s, i @ Include(_, _)) => (Some(i), List()) :: s case (s, c @ ()) => (None, List()) :: s case ((r, comments) :: others, c @ Comment(_)) => (r, c :: comments) :: others case (s, _) => s }.collect { case (Some(r @ Route(_, _, _, _)), comments) => r.copy(comments = comments).setPos(r.pos) case (Some(i @ Include(_, _)), _) => i } } def parse(text: String): ParseResult[List[Rule]] = { parser(new CharSequenceReader(text)) } }
ktoso/playframework
framework/src/routes-compiler/src/main/scala/play/routes/compiler/RoutesFileParser.scala
Scala
apache-2.0
12,137
/* * Copyright (C) 2009-2018 Lightbend Inc. <https://www.lightbend.com> */ package play.api.inject import java.util.concurrent.Executor import javax.inject.{ Inject, Provider, Singleton } import akka.actor.{ ActorSystem, CoordinatedShutdown } import akka.stream.Materializer import com.typesafe.config.Config import play.api._ import play.api.http.HttpConfiguration._ import play.api.http._ import play.api.libs.Files.TemporaryFileReaperConfigurationProvider import play.api.libs.Files._ import play.api.libs.concurrent._ import play.api.mvc._ import play.api.mvc.request.{ DefaultRequestFactory, RequestFactory } import play.api.routing.Router import play.core.j.JavaRouterAdapter import play.core.routing.GeneratedRouter import play.libs.concurrent.HttpExecutionContext import scala.concurrent.{ ExecutionContext, ExecutionContextExecutor } /** * The Play BuiltinModule. * * Provides all the core components of a Play application. This is typically automatically enabled by Play for an * application. */ class BuiltinModule extends SimpleModule((env, conf) => { def dynamicBindings(factories: ((Environment, Configuration) => Seq[Binding[_]])*) = { factories.flatMap(_ (env, conf)) } Seq( bind[Environment] to env, bind[ConfigurationProvider].to(new ConfigurationProvider(conf)), bind[Configuration].toProvider[ConfigurationProvider], bind[Config].toProvider[ConfigProvider], bind[HttpConfiguration].toProvider[HttpConfigurationProvider], bind[ParserConfiguration].toProvider[ParserConfigurationProvider], bind[CookiesConfiguration].toProvider[CookiesConfigurationProvider], bind[FlashConfiguration].toProvider[FlashConfigurationProvider], bind[SessionConfiguration].toProvider[SessionConfigurationProvider], bind[ActionCompositionConfiguration].toProvider[ActionCompositionConfigurationProvider], bind[FileMimeTypesConfiguration].toProvider[FileMimeTypesConfigurationProvider], bind[SecretConfiguration].toProvider[SecretConfigurationProvider], bind[TemporaryFileReaperConfiguration].toProvider[TemporaryFileReaperConfigurationProvider], bind[CookieHeaderEncoding].to[DefaultCookieHeaderEncoding], bind[RequestFactory].to[DefaultRequestFactory], bind[TemporaryFileReaper].to[DefaultTemporaryFileReaper], bind[TemporaryFileCreator].to[DefaultTemporaryFileCreator], bind[PlayBodyParsers].to[DefaultPlayBodyParsers], bind[BodyParsers.Default].toSelf, bind[DefaultActionBuilder].to[DefaultActionBuilderImpl], bind[ControllerComponents].to[DefaultControllerComponents], bind[MessagesActionBuilder].to[DefaultMessagesActionBuilderImpl], bind[MessagesControllerComponents].to[DefaultMessagesControllerComponents], bind[Futures].to[DefaultFutures], // Application lifecycle, bound both to the interface, and its implementation, so that Application can access it // to shut it down. bind[DefaultApplicationLifecycle].toSelf, bind[ApplicationLifecycle].to(bind[DefaultApplicationLifecycle]), bind[Application].to[DefaultApplication], bind[play.Application].to[play.DefaultApplication], bind[play.routing.Router].to[JavaRouterAdapter], bind[ActorSystem].toProvider[ActorSystemProvider], bind[Materializer].toProvider[MaterializerProvider], bind[CoordinatedShutdown].toProvider[CoordinatedShutdownProvider], bind[ExecutionContextExecutor].toProvider[ExecutionContextProvider], bind[ExecutionContext].to(bind[ExecutionContextExecutor]), bind[Executor].to(bind[ExecutionContextExecutor]), bind[HttpExecutionContext].toSelf, bind[play.core.j.JavaContextComponents].to[play.core.j.DefaultJavaContextComponents], bind[play.core.j.JavaHandlerComponents].to[play.core.j.DefaultJavaHandlerComponents], bind[FileMimeTypes].toProvider[DefaultFileMimeTypesProvider] ) ++ dynamicBindings( HttpErrorHandler.bindingsFromConfiguration, HttpFilters.bindingsFromConfiguration, HttpRequestHandler.bindingsFromConfiguration, ActionCreator.bindingsFromConfiguration, RoutesProvider.bindingsFromConfiguration ) }) // This allows us to access the original configuration via this // provider while overriding the binding for Configuration itself. class ConfigurationProvider(val get: Configuration) extends Provider[Configuration] class ConfigProvider @Inject() (configuration: Configuration) extends Provider[Config] { override def get() = configuration.underlying } @Singleton class RoutesProvider @Inject() (injector: Injector, environment: Environment, configuration: Configuration, httpConfig: HttpConfiguration) extends Provider[Router] { lazy val get = { val prefix = httpConfig.context val router = Router.load(environment, configuration) .fold[Router](Router.empty)(injector.instanceOf(_)) router.withPrefix(prefix) } } object RoutesProvider { def bindingsFromConfiguration(environment: Environment, configuration: Configuration): Seq[Binding[_]] = { val routerClass = Router.load(environment, configuration) // If it's a generated router, then we need to provide a binding for it. Otherwise, it's the users // (or the library that provided the router) job to provide a binding for it. val routerInstanceBinding = routerClass match { case Some(generated) if classOf[GeneratedRouter].isAssignableFrom(generated) => Seq(bind(generated).toSelf) case _ => Nil } routerInstanceBinding :+ bind[Router].toProvider[RoutesProvider] } }
Shenker93/playframework
framework/src/play/src/main/scala/play/api/inject/BuiltinModule.scala
Scala
apache-2.0
5,511
/** * Copyright (C) 2015 Stratio (http://stratio.com) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.stratio.datasource.mongodb.writer import com.mongodb.casbah.Imports._ import com.stratio.datasource.util.Config /** * A simple mongodb writer. * * @param config Configuration parameters (host,database,collection,...) */ private[mongodb] class MongodbSimpleWriter(config: Config) extends MongodbWriter(config) { override def save(it: Iterator[DBObject], mongoClient: MongoClient): Unit = it.foreach(dbo => dbCollection(mongoClient).save(dbo, writeConcern)) }
darroyocazorla/spark-mongodb
spark-mongodb/src/main/scala/com/stratio/datasource/mongodb/writer/MongodbSimpleWriter.scala
Scala
apache-2.0
1,105
/* * Copyright 2016 HM Revenue & Customs * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package uk.gov.hmrc.ct.ct600a.v2 import uk.gov.hmrc.ct.box.{Calculated, CtBoxIdentifier, CtOptionalInteger} import uk.gov.hmrc.ct.ct600.v2.calculations.LoansToParticipatorsCalculator import uk.gov.hmrc.ct.ct600a.v2.retriever.CT600ABoxRetriever case class A8(value: Option[Int]) extends CtBoxIdentifier(name = "A8 - Information about loans made during the return period which have been repaid more than nine months after the end of the period and relief is due now") with CtOptionalInteger object A8 extends Calculated[A8, CT600ABoxRetriever] with LoansToParticipatorsCalculator { override def calculate(fieldValueRetriever: CT600ABoxRetriever): A8 = { import fieldValueRetriever._ calculateA8(retrieveCP2(), retrieveLP02(), retrieveLPQ07()) } }
ahudspith-equalexperts/ct-calculations
src/main/scala/uk/gov/hmrc/ct/ct600a/v2/A8.scala
Scala
apache-2.0
1,367
package io.iohk.ethereum.consensus.validators.std import akka.util.ByteString import io.iohk.ethereum.consensus.ethash.blocks.OmmersSeqEnc import io.iohk.ethereum.consensus.validators.BlockValidator import io.iohk.ethereum.crypto._ import io.iohk.ethereum.domain.{Block, BlockBody, BlockHeader, Receipt, SignedTransaction} import io.iohk.ethereum.ledger.BloomFilter import io.iohk.ethereum.utils.ByteUtils.or object StdBlockValidator extends BlockValidator { /** * Validates [[io.iohk.ethereum.domain.BlockHeader.transactionsRoot]] matches [[BlockBody.transactionList]] * based on validations stated in section 4.4.2 of http://paper.gavwood.com/ * * @param block Block to validate * @return Block if valid, a Some otherwise */ private def validateTransactionRoot(block: Block): Either[BlockError, BlockValid] = { val isValid = MptListValidator.isValid[SignedTransaction]( block.header.transactionsRoot.toArray[Byte], block.body.transactionList, SignedTransaction.byteArraySerializable ) if (isValid) Right(BlockValid) else Left(BlockTransactionsHashError) } /** * Validates [[BlockBody.uncleNodesList]] against [[io.iohk.ethereum.domain.BlockHeader.ommersHash]] * based on validations stated in section 4.4.2 of http://paper.gavwood.com/ * * @param block Block to validate * @return Block if valid, a Some otherwise */ private def validateOmmersHash(block: Block): Either[BlockError, BlockValid] = { val encodedOmmers: Array[Byte] = block.body.uncleNodesList.toBytes if (kec256(encodedOmmers) sameElements block.header.ommersHash) Right(BlockValid) else Left(BlockOmmersHashError) } /** * Validates [[Receipt]] against [[io.iohk.ethereum.domain.BlockHeader.receiptsRoot]] * based on validations stated in section 4.4.2 of http://paper.gavwood.com/ * * @param blockHeader Block header to validate * @param receipts Receipts to use * @return */ private def validateReceipts(blockHeader: BlockHeader, receipts: Seq[Receipt]): Either[BlockError, BlockValid] = { val isValid = MptListValidator.isValid[Receipt](blockHeader.receiptsRoot.toArray[Byte], receipts, Receipt.byteArraySerializable) if (isValid) Right(BlockValid) else Left(BlockReceiptsHashError) } /** * Validates [[io.iohk.ethereum.domain.BlockHeader.logsBloom]] against [[Receipt.logsBloomFilter]] * based on validations stated in section 4.4.2 of http://paper.gavwood.com/ * * @param blockHeader Block header to validate * @param receipts Receipts to use * @return */ private def validateLogBloom(blockHeader: BlockHeader, receipts: Seq[Receipt]): Either[BlockError, BlockValid] = { val logsBloomOr = if (receipts.isEmpty) BloomFilter.EmptyBloomFilter else ByteString(or(receipts.map(_.logsBloomFilter.toArray): _*)) if (logsBloomOr == blockHeader.logsBloom) Right(BlockValid) else Left(BlockLogBloomError) } /** * Validates that the block body does not contain transactions * * @param blockBody BlockBody to validate * @return BlockValid if there are no transactions, error otherwise */ private def validateNoTransactions(blockBody: BlockBody): Either[BlockError, BlockValid] = { Either.cond(blockBody.transactionList.isEmpty, BlockValid, CheckpointBlockTransactionsNotEmptyError) } /** * Validates that the block body does not contain ommers * * @param blockBody BlockBody to validate * @return BlockValid if there are no ommers, error otherwise */ private def validateNoOmmers(blockBody: BlockBody): Either[BlockError, BlockValid] = { Either.cond(blockBody.uncleNodesList.isEmpty, BlockValid, CheckpointBlockOmmersNotEmptyError) } /** * This method allows validate block with checkpoint. It performs the following validations: * - no transactions in the body * - no ommers in the body * * @param blockBody BlockBody to validate * @return The BlockValid if validations are ok, BlockError otherwise */ private def validateBlockWithCheckpoint(blockBody: BlockBody): Either[BlockError, BlockValid] = { for { _ <- validateNoTransactions(blockBody) _ <- validateNoOmmers(blockBody) } yield BlockValid } /** * This method allows validate a regular Block. It only performs the following validations (stated on * section 4.4.2 of http://paper.gavwood.com/): * - BlockValidator.validateTransactionRoot * - BlockValidator.validateOmmersHash * * @param block Block to validate * @return The BlockValid if validations are ok, BlockError otherwise */ private def validateRegularBlock(block: Block): Either[BlockError, BlockValid] = { for { _ <- validateTransactionRoot(block) _ <- validateOmmersHash(block) } yield BlockValid } /** * This method allows validate a Block. It only perfoms the following validations (stated on * section 4.4.2 of http://paper.gavwood.com/): * - validate regular block or block with checkpoint * - BlockValidator.validateReceipts * - BlockValidator.validateLogBloom * * @param block Block to validate * @param receipts Receipts to be in validation process * @return The block if validations are ok, error otherwise */ def validate(block: Block, receipts: Seq[Receipt]): Either[BlockError, BlockValid] = { for { _ <- validateHeaderAndBody(block.header, block.body) _ <- validateBlockAndReceipts(block.header, receipts) } yield BlockValid } /** * This method allows validate that a BlockHeader matches a BlockBody. * * @param blockHeader to validate * @param blockBody to validate * @return The block if the header matched the body, error otherwise */ def validateHeaderAndBody(blockHeader: BlockHeader, blockBody: BlockBody): Either[BlockError, BlockValid] = { val block = Block(blockHeader, blockBody) if (blockHeader.hasCheckpoint) validateBlockWithCheckpoint(blockBody) else validateRegularBlock(block) } /** * This method allows validations of the block with its associated receipts. * It only perfoms the following validations (stated on section 4.4.2 of http://paper.gavwood.com/): * - BlockValidator.validateReceipts * - BlockValidator.validateLogBloom * * @param blockHeader Block header to validate * @param receipts Receipts to be in validation process * @return The block if validations are ok, error otherwise */ def validateBlockAndReceipts(blockHeader: BlockHeader, receipts: Seq[Receipt]): Either[BlockError, BlockValid] = { for { _ <- validateReceipts(blockHeader, receipts) _ <- validateLogBloom(blockHeader, receipts) } yield BlockValid } sealed trait BlockError case object BlockTransactionsHashError extends BlockError case object BlockOmmersHashError extends BlockError case object BlockReceiptsHashError extends BlockError case object BlockLogBloomError extends BlockError case object CheckpointBlockTransactionsNotEmptyError extends BlockError case object CheckpointBlockOmmersNotEmptyError extends BlockError sealed trait BlockValid case object BlockValid extends BlockValid }
input-output-hk/etc-client
src/main/scala/io/iohk/ethereum/consensus/validators/std/StdBlockValidator.scala
Scala
mit
7,308
package org.deepdive.inference import org.deepdive.settings.FactorFunctionVariable import org.deepdive.calibration._ import org.deepdive.settings._ import java.io.File trait InferenceDataStore { /* Initializes the data store. This method must be called before any other methods in this class. */ def init() : Unit /* * The number of tuples in each batch. If not defined, we use one large batch. * The user can overwrite this number using the inference.batch_size config setting. */ def BatchSize : Option[Int] /* Generate a grounded graph based on the factor description */ def groundFactorGraph(schema: Map[String, _ <: VariableDataType], factorDescs: Seq[FactorDesc], calibrationSettings: CalibrationSettings, skipLearning: Boolean, weightTable: String, dbSettings: DbSettings = null, parallelGrounding: Boolean) : Unit /* * Dumps the factor graphs with the given serializier */ def dumpFactorGraph(serializer: Serializer, schema: Map[String, _ <: VariableDataType], factorDescs: Seq[FactorDesc], holdoutFraction: Double, holdoutQuery: Option[String], weightsPath: String, variablesPath: String, factorsPath: String, edgesPath: String, parallelGrounding: Boolean) : Unit /* * Writes inference results produced by the sampler back to the data store. * The given file is a space-separated file with three columns: * VariableID, LastSampleValue, ExpectedValue */ def writebackInferenceResult(variableSchema: Map[String, _ <: VariableDataType], //variableOutputFile: String, weightsOutputFile: String, parallelGrounding: Boolean) : Unit variableOutputFile: String, weightsOutputFile: String, parallelGrounding: Boolean, dbSettings: DbSettings) : Unit /* * Gets calibration data for the given buckets. * writebackInferenceResult must be called before this method can be called. */ def getCalibrationData(variable: String, dataType: VariableDataType, buckets: List[Bucket]) : Map[Bucket, BucketData] } /* Stores the factor graph and inference results. */ trait InferenceDataStoreComponent { def inferenceDataStore : InferenceDataStore }
gaapt/deepdive
src/main/scala/org/deepdive/inference/datastore/InferenceDataStore.scala
Scala
apache-2.0
2,219
package com.cloudera.ds.svdbench import org.apache.mahout.math.VectorWritable import org.scalatest.ShouldMatchers class TestGenerateMatrix extends SparkTestUtils with ShouldMatchers { def countNonZero(vector: VectorWritable) = { vector.get().getNumNonZeroElements } sparkTest("Test Generate Matrix. ") { val nRows = 300 val nCols = 200 val sparsity = 0.1 val matrix = GenerateMatrix.generateSparseMatrix(nRows, nCols, sparsity, 10, sc) matrix.count() shouldEqual 300L val nNonZero = matrix.values.map(vec => vec.get.getNumNonZeroElements).sum() println( nNonZero/(nRows * nCols)) math.abs(nNonZero/(nRows * nCols) - sparsity) should be < 0.05 } }
jhlch/svd-benchmark
src/test/scala/com/cloudera/ds/svdbench/TestGenerateMatrix.scala
Scala
apache-2.0
695
package kornell.server import java.io.{File, FileInputStream, InputStream} import java.util.{HashMap, UUID} import kornell.server.jdbc.repository.{CourseClassRepo, CourseRepo} import kornell.server.report.ReportCourseClassGenerator.getClass import kornell.server.util.Settings import net.sf.jasperreports.engine._ import net.sf.jasperreports.engine.data.JRBeanCollectionDataSource import net.sf.jasperreports.engine.export.JRXlsExporter import net.sf.jasperreports.engine.util.JRLoader import scala.collection.JavaConverters.seqAsJavaListConverter package object report { def getReportBytesFromStream(certificateData: List[Any], parameters: HashMap[String, Object], jasperStream: InputStream, fileType: String): Array[Byte] = runReportToPdf(certificateData, parameters, JRLoader.loadObject(jasperStream).asInstanceOf[JasperReport], fileType) def getReportBytesFromJrxml(certificateData: List[Any], parameters: HashMap[String, Object], jrxmlFileName: String, fileType: String): Array[Byte] = { val inputStreamJR = getClass.getResourceAsStream("/reports/" + jrxmlFileName + ".jrxml") val reportCompiled = JasperCompileManager.compileReport(inputStreamJR) runReportToPdf(certificateData, parameters, reportCompiled, fileType) } def runReportToPdf(certificateData: List[Any], parameters: HashMap[String, Object], jasperReport: JasperReport, fileType: String): Array[Byte] = if (fileType != null && fileType == "xls") { val fileName = Settings.TMP_DIR + "tmp-" + UUID.randomUUID.toString + ".xls" val exporterXLS = new JRXlsExporter() val jasperPrint = JasperFillManager.fillReport(jasperReport, parameters, new JRBeanCollectionDataSource(certificateData asJava)) exporterXLS.setParameter(JRExporterParameter.JASPER_PRINT, jasperPrint) exporterXLS.setParameter(JRExporterParameter.OUTPUT_FILE_NAME, fileName) exporterXLS.exportReport() val source = scala.io.Source.fromFile(fileName)(scala.io.Codec.ISO8859) val byteArray = source.map(_.toByte).toArray source.close() byteArray } else JasperRunManager.runReportToPdf(jasperReport, parameters, new JRBeanCollectionDataSource(certificateData asJava)) def clearJasperFiles: String = { val folder = new File(Settings.TMP_DIR) var deleted = "Deleting files from folder " + folder.getAbsolutePath + ": " folder.listFiles().foreach(file => if (file.getName.endsWith(".jasper") || file.getName.endsWith(".xls") || file.getName.endsWith(".pdf")) { file.delete() deleted += file.getName + ", " }) deleted } def getFileType(fileType: String): String = { if (fileType == "xls") "xls" else "pdf" } def getContentType(fileType: String): String = { if (getFileType(fileType) == "xls") "application/vnd.ms-excel" else "application/pdf" } def getInstitutionUUID(courseClassUUID: String, courseUUID: String = null): String = { if (courseUUID != null) { CourseRepo(courseUUID).get.getInstitutionUUID } else { CourseClassRepo(courseClassUUID).get.getInstitutionUUID } } }
Craftware/Kornell
kornell-api/src/main/scala/kornell/server/report/package.scala
Scala
apache-2.0
3,130
/* * Copyright 2016-2020 47 Degrees Open Source <https://www.47deg.com> * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package github4s.integration import cats.data.NonEmptyList import cats.effect.{IO, Resource} import cats.implicits._ import github4s.GHError.{NotFoundError, UnauthorizedError} import github4s.domain._ import github4s.utils.{BaseIntegrationSpec, Integration} import github4s.{GHResponse, Github} trait ReposSpec extends BaseIntegrationSpec { "Repos >> Get" should "return the expected name when a valid repo is provided" taggedAs Integration in { val response = clientResource .use { client => Github[IO](client, accessToken).repos .get(validRepoOwner, validRepoName, headers = headerUserAgent) } .unsafeRunSync() testIsRight[Repository](response, r => r.name shouldBe validRepoName) response.statusCode shouldBe okStatusCode } it should "return error when an invalid repo name is passed" taggedAs Integration in { val response = clientResource .use { client => Github[IO](client, accessToken).repos .get(validRepoOwner, invalidRepoName, headers = headerUserAgent) } .unsafeRunSync() testIsLeft[NotFoundError, Repository](response) response.statusCode shouldBe notFoundStatusCode } "Repos >> ListReleases" should "return the expected repos when a valid org is provided" taggedAs Integration in { val response = clientResource .use { client => Github[IO](client, accessToken).repos .listReleases(validRepoOwner, validRepoName, None, headers = headerUserAgent) } .unsafeRunSync() testIsRight[List[Release]](response, r => r.nonEmpty shouldBe true) response.statusCode shouldBe okStatusCode } "Repos >> getRelease" should "return the expected repos when a valid org is provided" taggedAs Integration in { val responseResource = for { client <- clientResource gh: Github[IO] = Github[IO](client, accessToken) releasesIO = gh.repos.listReleases(validRepoOwner, validRepoName, None, headers = headerUserAgent) releasesResponse <- Resource.liftF(releasesIO) releases <- Resource.liftF(IO.fromEither(releasesResponse.result)) releasesAreFoundCheck: IO[List[(Release, GHResponse[Option[Release]])]] = releases.map { release => val releaseIO = gh.repos .getRelease(release.id, validRepoOwner, validRepoName, headers = headerUserAgent) releaseIO.map(r => release -> r) }.sequence } yield releasesAreFoundCheck val responseList: List[(Release, GHResponse[Option[Release]])] = responseResource .use(identity) .unsafeRunSync() forAll(responseList) { case (release, response) => testIsRight[Option[Release]](response, r => r should contain(release)) response.statusCode shouldBe okStatusCode } } "Repos >> LatestRelease" should "return the expected repos when a valid org is provided" taggedAs Integration in { val response = clientResource .use { client => Github[IO](client, accessToken).repos .latestRelease(validRepoOwner, validRepoName, headers = headerUserAgent) } .unsafeRunSync() testIsRight[Option[Release]](response, r => r.nonEmpty shouldBe true) response.statusCode shouldBe okStatusCode } "Repos >> ListOrgRepos" should "return the expected repos when a valid org is provided" taggedAs Integration in { val response = clientResource .use { client => Github[IO](client, accessToken).repos .listOrgRepos(validRepoOwner, headers = headerUserAgent) } .unsafeRunSync() testIsRight[List[Repository]](response, r => r.nonEmpty shouldBe true) response.statusCode shouldBe okStatusCode } it should "return error when an invalid org is passed" taggedAs Integration in { val response = clientResource .use { client => Github[IO](client, accessToken).repos .listOrgRepos(invalidRepoName, headers = headerUserAgent) } .unsafeRunSync() testIsLeft[NotFoundError, List[Repository]](response) response.statusCode shouldBe notFoundStatusCode } "Repos >> ListUserRepos" should "return the expected repos when a valid user is provided" taggedAs Integration in { val response = clientResource .use { client => Github[IO](client, accessToken).repos .listUserRepos(validUsername, headers = headerUserAgent) } .unsafeRunSync() testIsRight[List[Repository]](response, r => r.nonEmpty shouldBe true) response.statusCode shouldBe okStatusCode } it should "return error when an invalid user is passed" taggedAs Integration in { val response = clientResource .use { client => Github[IO](client, accessToken).repos .listUserRepos(invalidUsername, headers = headerUserAgent) } .unsafeRunSync() testIsLeft[NotFoundError, List[Repository]](response) response.statusCode shouldBe notFoundStatusCode } "Repos >> GetContents" should "return the expected contents when valid path is provided" taggedAs Integration in { val response = clientResource .use { client => Github[IO](client, accessToken).repos .getContents(validRepoOwner, validRepoName, validFilePath, headers = headerUserAgent) } .unsafeRunSync() testIsRight[NonEmptyList[Content]](response, r => r.head.path shouldBe validFilePath) response.statusCode shouldBe okStatusCode } "Repos >> GetContents" should "have the same contents with getBlob using fileSha" taggedAs Integration in { val blobResponseFileContent = for { client <- clientResource res = Github[IO](client, accessToken) fileContentsIO = res.repos.getContents( owner = validRepoOwner, repo = validRepoName, path = validFilePath, headers = headerUserAgent ) fileContentsResponse <- Resource.liftF(fileContentsIO) fileContentsEither = fileContentsResponse.result fileContents <- Resource.liftF(IO.fromEither(fileContentsEither)) blobContentIO = res.gitData.getBlob( owner = validRepoOwner, repo = validRepoName, fileSha = fileContents.head.sha, headers = headerUserAgent ) blobContentResponse <- Resource.liftF(blobContentIO) } yield (blobContentResponse, fileContents.head) val (blobContentResponse, fileContent) = blobResponseFileContent .use(a => IO.apply(a)) .unsafeRunSync() testIsRight[BlobContent](blobContentResponse, _.content.shouldBe(fileContent.content)) blobContentResponse.statusCode shouldBe okStatusCode } it should "return error when an invalid path is passed" taggedAs Integration in { val response = clientResource .use { client => Github[IO](client, accessToken).repos .getContents(validRepoOwner, validRepoName, invalidFilePath, headers = headerUserAgent) } .unsafeRunSync() testIsLeft[NotFoundError, NonEmptyList[Content]](response) response.statusCode shouldBe notFoundStatusCode } "Repos >> ListCommits" should "return the expected list of commits for valid data" taggedAs Integration in { val response = clientResource .use { client => Github[IO](client, accessToken).repos .listCommits(validRepoOwner, validRepoName, headers = headerUserAgent) } .unsafeRunSync() testIsRight[List[Commit]](response, r => r.nonEmpty shouldBe true) response.statusCode shouldBe okStatusCode } it should "return error for invalid repo name" taggedAs Integration in { val response = clientResource .use { client => Github[IO](client, accessToken).repos .listCommits(invalidRepoName, validRepoName, headers = headerUserAgent) } .unsafeRunSync() testIsLeft[NotFoundError, List[Commit]](response) response.statusCode shouldBe notFoundStatusCode } "Repos >> ListBranches" should "return the expected list of branches for valid data" taggedAs Integration in { val response = clientResource .use { client => Github[IO](client, accessToken).repos .listBranches(validRepoOwner, validRepoName, headers = headerUserAgent) } .unsafeRunSync() testIsRight[List[Branch]](response, r => r.nonEmpty shouldBe true) response.statusCode shouldBe okStatusCode } it should "return error for invalid repo name" taggedAs Integration in { val response = clientResource .use { client => Github[IO](client, accessToken).repos .listBranches(invalidRepoName, validRepoName, headers = headerUserAgent) } .unsafeRunSync() testIsLeft[NotFoundError, List[Branch]](response) response.statusCode shouldBe notFoundStatusCode } "Repos >> ListContributors" should "return the expected list of contributors for valid data" taggedAs Integration in { val response = clientResource .use { client => Github[IO](client, accessToken).repos .listContributors(validRepoOwner, validRepoName, headers = headerUserAgent) } .unsafeRunSync() testIsRight[List[User]](response, r => r shouldNot be(empty)) response.statusCode shouldBe okStatusCode } it should "return error for invalid repo name" taggedAs Integration in { val response = clientResource .use { client => Github[IO](client, accessToken).repos .listContributors(invalidRepoName, validRepoName, headers = headerUserAgent) } .unsafeRunSync() testIsLeft[NotFoundError, List[User]](response) response.statusCode shouldBe notFoundStatusCode } "Repos >> ListCollaborators" should "return the expected list of collaborators for valid data" taggedAs Integration in { val response = clientResource .use { client => Github[IO](client, accessToken).repos .listCollaborators(validRepoOwner, validRepoName, headers = headerUserAgent) } .unsafeRunSync() testIsRight[List[User]](response, r => r shouldNot be(empty)) response.statusCode shouldBe okStatusCode } it should "return error for invalid repo name" taggedAs Integration in { val response = clientResource .use { client => Github[IO](client, accessToken).repos .listCollaborators(invalidRepoName, validRepoName, headers = headerUserAgent) } .unsafeRunSync() testIsLeft[NotFoundError, List[User]](response) response.statusCode shouldBe notFoundStatusCode } "Repos >> UserIsCollaborator" should "return true when the user is a collaborator" taggedAs Integration in { val response = clientResource .use { client => Github[IO](client, accessToken).repos .userIsCollaborator( validRepoOwner, validRepoName, validUsername, headers = headerUserAgent ) } .unsafeRunSync() testIsRight[Boolean](response, r => r should be(true)) response.statusCode shouldBe noContentStatusCode } it should "return false when the user is not a collaborator" taggedAs Integration in { val response = clientResource .use { client => Github[IO](client, accessToken).repos .userIsCollaborator( validRepoOwner, validRepoName, invalidUsername, headers = headerUserAgent ) } .unsafeRunSync() testIsRight[Boolean](response, r => r should be(false)) response.statusCode shouldBe notFoundStatusCode } it should "return error when other errors occur" taggedAs Integration in { val response = clientResource .use { client => Github[IO](client, "invalid-access-token".some).repos .userIsCollaborator( validRepoOwner, validRepoName, validUsername, headers = headerUserAgent ) } .unsafeRunSync() testIsLeft[UnauthorizedError, Boolean](response) response.statusCode shouldBe unauthorizedStatusCode } "Repos >> GetRepoPermissionForUser" should "return user repo permission" taggedAs Integration in { val response = clientResource .use { client => Github[IO](client, accessToken).repos .getRepoPermissionForUser( validRepoOwner, validRepoName, validUsername, headers = headerUserAgent ) } .unsafeRunSync() testIsRight[UserRepoPermission](response, r => r.user.login shouldBe validUsername) response.statusCode shouldBe okStatusCode } it should "return error when invalid username is passed" taggedAs Integration in { val response = clientResource .use { client => Github[IO](client, accessToken).repos .getRepoPermissionForUser( validRepoOwner, validRepoName, invalidUsername, headers = headerUserAgent ) } .unsafeRunSync() testIsLeft[NotFoundError, UserRepoPermission](response) response.statusCode shouldBe notFoundStatusCode } "Repos >> GetStatus" should "return a combined status" taggedAs Integration in { val response = clientResource .use { client => Github[IO](client, accessToken).repos .getCombinedStatus( validRepoOwner, validRepoName, validRefSingle, headers = headerUserAgent ) } .unsafeRunSync() testIsRight[CombinedStatus]( response, r => r.repository.full_name shouldBe s"$validRepoOwner/$validRepoName" ) response.statusCode shouldBe okStatusCode } it should "return an error when an invalid ref is passed" taggedAs Integration in { val response = clientResource .use { client => Github[IO](client, accessToken).repos .getCombinedStatus(validRepoOwner, validRepoName, invalidRef, headers = headerUserAgent) } .unsafeRunSync() testIsLeft[NotFoundError, CombinedStatus](response) response.statusCode shouldBe notFoundStatusCode } "Repos >> ListStatus" should "return a non empty list when a valid ref is provided" taggedAs Integration in { val response = clientResource .use { client => Github[IO](client, accessToken).repos .listStatuses(validRepoOwner, validRepoName, validCommitSha, headers = headerUserAgent) } .unsafeRunSync() testIsRight[List[Status]](response, r => r.nonEmpty shouldBe true) response.statusCode shouldBe okStatusCode } it should "return an error when an invalid ref is provided" taggedAs Integration in { val response = clientResource .use { client => Github[IO](client, accessToken).repos .listStatuses(validRepoOwner, validRepoName, invalidRef, headers = headerUserAgent) } .unsafeRunSync() testIsLeft[NotFoundError, List[Status]](response) response.statusCode shouldBe notFoundStatusCode } }
47deg/github4s
github4s/src/test/scala/github4s/integration/ReposSpec.scala
Scala
apache-2.0
15,609
package com.weibo.datasys import akka.actor.{ ActorSystem, Props, _ } import akka.io.IO import akka.util.Timeout import com.weibo.datasys.rest.Configuration import spray.can.Http import scala.concurrent.duration._ /** * Created by tuoyu on 25/01/2017. */ object Main extends Configuration { def main(args: Array[String]): Unit = { lazy val cmd = new ArgumentConf(args) if (cmd.help()) { cmd.printHelp() sys.exit(0) } if (cmd.rest_service()) { startRestService() } else if (cmd.scheduler_service()) { startJobSchedulerService() } def startRestService() = { implicit val system = ActorSystem( cluster_name, config.getConfig("rest-service").withFallback(config) ) implicit val executionContext = system.dispatcher implicit val timeout = Timeout(10 seconds) val restService = system.actorOf(Props[RestServiceActor], RestServiceActor.Name) IO(Http) ! Http.Bind(restService, host, port) } def startJobSchedulerService() = { implicit val system = ActorSystem( cluster_name, config.getConfig("scheduler-service").withFallback(config) ) system.actorOf(JobSchedulerActor.props(), JobSchedulerActor.Name) } } }
batizty/wolong
src/main/scala/com/weibo/datasys/Main.scala
Scala
apache-2.0
1,268
/* * Part of GDL book_api. * Copyright (C) 2018 Global Digital Library * * See LICENSE */ package io.digitallibrary.bookapi.repository import java.sql.PreparedStatement import io.digitallibrary.language.model.LanguageTag import io.digitallibrary.bookapi.model.api.OptimisticLockException import io.digitallibrary.bookapi.model.domain.InTranslation import scalikejdbc._ trait InTranslationRepository { val inTranslationRepository: InTranslationRepository class InTranslationRepository { private val tr = InTranslation.syntax def add(inTranslation: InTranslation)(implicit session: DBSession = AutoSession): InTranslation = { import collection.JavaConverters._ val t = InTranslation.column val startRevision = 1 val userBinder = ParameterBinder( value = inTranslation.userIds, binder = (stmt: PreparedStatement, idx: Int) => stmt.setArray(idx, stmt.getConnection.createArrayOf("text", inTranslation.userIds.asJava.toArray)) ) val id = insert.into(InTranslation).namedValues( t.revision -> startRevision, t.userIds -> userBinder, t.originalTranslationId -> inTranslation.originalTranslationId, t.newTranslationId -> inTranslation.newTranslationId, t.fromLanguage -> inTranslation.fromLanguage.toString, t.toLanguage -> inTranslation.toLanguage.toString, t.crowdinToLanguage -> inTranslation.crowdinToLanguage, t.crowdinProjectId -> inTranslation.crowdinProjectId ).toSQL .updateAndReturnGeneratedKey() .apply() inTranslation.copy(id = Some(id), revision = Some(startRevision)) } def updateTranslation(toUpdate: InTranslation)(implicit session: DBSession = AutoSession): InTranslation = { import collection.JavaConverters._ val t = InTranslation.column val nextRevision = toUpdate.revision.getOrElse(0) + 1 val userBinder = ParameterBinder( value = toUpdate.userIds, binder = (stmt: PreparedStatement, idx: Int) => stmt.setArray(idx, stmt.getConnection.createArrayOf("text", toUpdate.userIds.asJava.toArray)) ) val count = update(InTranslation).set( t.revision -> nextRevision, t.userIds -> userBinder, t.originalTranslationId -> toUpdate.originalTranslationId, t.newTranslationId -> toUpdate.newTranslationId, t.fromLanguage -> toUpdate.fromLanguage.toString, t.toLanguage -> toUpdate.toLanguage.toString, t.crowdinToLanguage -> toUpdate.crowdinToLanguage, t.crowdinProjectId -> toUpdate.crowdinProjectId ).where .eq(t.id, toUpdate.id).and .eq(t.revision, toUpdate.revision) .toSQL.update().apply() if (count != 1) { throw new OptimisticLockException() } else { toUpdate.copy(revision = Some(nextRevision)) } } def withId(id: Long)(implicit session: DBSession = ReadOnlyAutoSession): Option[InTranslation] = { select .from(InTranslation as tr) .where.eq(tr.id, id) .toSQL .map(InTranslation(tr)) .single().apply() } def forOriginalIdWithToLanguage(originalTranslationId: Long, toLanguage: LanguageTag)(implicit session: DBSession = ReadOnlyAutoSession): Option[InTranslation] = { select .from(InTranslation as tr) .where.eq(tr.originalTranslationId, originalTranslationId) .and.eq(tr.toLanguage, toLanguage.toString) .toSQL .map(InTranslation(tr)) .single().apply() } def forOriginalId(originalTranslationId: Long)(implicit session: DBSession = ReadOnlyAutoSession): Seq[InTranslation] = { select .from(InTranslation as tr) .where.eq(tr.originalTranslationId, originalTranslationId) .toSQL .map(InTranslation(tr)) .list().apply() } def inTranslationForUser(userId: String)(implicit session: DBSession = ReadOnlyAutoSession): Seq[InTranslation] = { sql"select ${tr.result.*} from ${InTranslation.as(tr)} where $userId = ANY(user_ids)".map(InTranslation(tr)).list().apply() } } }
GlobalDigitalLibraryio/book-api
src/main/scala/io/digitallibrary/bookapi/repository/InTranslationRepository.scala
Scala
apache-2.0
4,135
/** * Copyright (C) 2011 Orbeon, Inc. * * This program is free software; you can redistribute it and/or modify it under the terms of the * GNU Lesser General Public License as published by the Free Software Foundation; either version * 2.1 of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; * without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * See the GNU Lesser General Public License for more details. * * The full text of the license is available at http://www.gnu.org/copyleft/lesser.html */ package org.orbeon.oxf.xforms.xbl import org.dom4j._ import org.orbeon.oxf.common.{OXFException, Version} import org.orbeon.oxf.util.{IndentedLogger, Logging, Whitespace} import org.orbeon.oxf.xforms.XFormsConstants._ import org.orbeon.oxf.xforms._ import org.orbeon.oxf.xforms.analysis._ import org.orbeon.oxf.xml._ import org.orbeon.oxf.xml.dom4j.{Dom4jUtils, LocationData, LocationDocumentResult} import org.xml.sax.Attributes import scala.collection.mutable /** * All the information statically gathered about XBL bindings. * * TODO: * * - xbl:handler and models under xbl:implementation are copied for each binding. We should be able to do this better: * - do the "id" part of annotation only once * - therefore keep a single DOM for all uses of those * - however, if needed, still register namespace mappings by prefix once per mapping * - P2: even for templates that produce the same result per each instantiation: * - detect that situation (when is this possible?) * - keep a single DOM */ class XBLBindings( indentedLogger : IndentedLogger, partAnalysis : PartAnalysisImpl, metadata : Metadata, inlineXBL : Seq[Element] ) extends Logging { private implicit val Logger = indentedLogger // For unit test written in Java def this(indentedLogger: IndentedLogger, partAnalysis: PartAnalysisImpl, metadata: Metadata) = this(indentedLogger, partAnalysis, metadata: Metadata, Seq.empty) // We now know all inline XBL bindings, which we didn't in XFormsAnnotator. So // NOTE: Inline bindings are only extracted at the top level of a part. We could imagine extracting them within // all XBL components. They would then have to be properly scoped. if (partAnalysis ne null) // for unit test which passes null in! metadata.extractInlineXBL(inlineXBL, partAnalysis.startScope) private val logShadowTrees = false // whether to log shadow trees as they are built /* * Notes about id generation * * Two approaches: * * - use shared IdGenerator * - simpler * - drawback: automatic ids grow larger * - works for id allocation, but not for checking duplicate ids, but we do duplicate id check separately for XBL * anyway in ScopeExtractorContentHandler * - use separate outer/inner scope IdGenerator * - more complex * - requires to know inner/outer scope at annotation time * - requires XFormsAnnotator to provide start/end of XForms element * * As of 2009-09-14, we use an IdGenerator shared among top-level and all XBL bindings. */ case class Global(templateTree: SAXStore, compactShadowTree: Document) val concreteBindings = mutable.HashMap[String, ConcreteBinding]() val abstractBindingsWithGlobals = mutable.ArrayBuffer[AbstractBinding]() val allGlobals = mutable.ArrayBuffer[Global]() // Create concrete binding if there is an applicable abstract binding def processElementIfNeeded( controlElement : Element, controlPrefixedId : String, locationData : LocationData, containerScope : Scope ): Option[ConcreteBinding] = metadata.findBindingByPrefixedId(controlPrefixedId) flatMap { abstractBinding ⇒ generateRawShadowTree(controlElement, abstractBinding) map { rawShadowTree ⇒ val newBinding = createConcreteBinding( controlElement, controlPrefixedId, locationData, containerScope, abstractBinding, rawShadowTree ) concreteBindings += controlPrefixedId → newBinding newBinding } } private def createConcreteBinding( boundElement : Element, boundControlPrefixedId : String, locationData : LocationData, containerScope : Scope, abstractBinding : AbstractBinding, rawShadowTree : Document ): ConcreteBinding = { // New prefix corresponds to bound element prefixed id //val newPrefix = boundControlPrefixedId + COMPONENT_SEPARATOR val newInnerScope = partAnalysis.newScope(containerScope, boundControlPrefixedId) // NOTE: Outer scope is not necessarily the container scope! val outerScope = partAnalysis.scopeForPrefixedId(boundControlPrefixedId) // Annotate control tree val (templateTree, compactShadowTree) = annotateAndExtractSubtree( Some(boundElement), rawShadowTree, newInnerScope, outerScope, XXBLScope.inner, newInnerScope, hasFullUpdate(rawShadowTree), ignoreRoot = true ) // Annotate event handlers and implementation models def annotateByElement(element: Element) = annotateSubtreeByElement(boundElement, element, newInnerScope, outerScope, XXBLScope.inner, newInnerScope) val annotatedHandlers = abstractBinding.handlers map annotateByElement val annotatedModels = abstractBinding.modelElements map annotateByElement // Remember concrete binding information val newConcreteBinding = ConcreteBinding( abstractBinding, newInnerScope, outerScope, annotatedHandlers, annotatedModels, templateTree, compactShadowTree ) // Process globals here as the component is in use processGlobalsIfNeeded(abstractBinding, locationData) // Extract xbl:xbl/xbl:script and xbl:binding/xbl:resources/xbl:style // TODO: should do this here, in order to include only the scripts and resources actually used newConcreteBinding } /** * From a raw non-control tree (handlers, models) rooted at an element, produce a full annotated tree. */ def annotateSubtreeByElement( boundElement : Element, element : Element, innerScope : Scope, outerScope : Scope, startScope : XXBLScope, containerScope : Scope ): Element = annotateSubtree1( Some(boundElement), Dom4jUtils.createDocumentCopyParentNamespaces(element, false), innerScope, outerScope, startScope, containerScope, hasFullUpdate = false, ignoreRoot = false ).getRootElement // Annotate a tree def annotateSubtree1( boundElement : Option[Element], // for xml:base resolution rawTree : Node, innerScope : Scope, outerScope : Scope, startScope : XXBLScope, containerScope : Scope, hasFullUpdate : Boolean, ignoreRoot : Boolean ): Document = withDebug("annotating tree") { val baseURI = XFormsUtils.resolveXMLBase(boundElement.orNull, null, ".").toString val fullAnnotatedTree = annotateShadowTree(rawTree, containerScope.fullPrefix) TransformerUtils.writeDom4j( fullAnnotatedTree, new ScopeExtractor(null, innerScope, outerScope, startScope, containerScope.fullPrefix, baseURI) ) fullAnnotatedTree } // Annotate a subtree and return a template and compact tree def annotateAndExtractSubtree( boundElement : Option[Element], // for xml:base resolution rawTree : Node, innerScope : Scope, outerScope : Scope, startScope : XXBLScope, containerScope : Scope, hasFullUpdate : Boolean, ignoreRoot : Boolean ): (SAXStore, Document) = withDebug("annotating and extracting tree") { val baseURI = XFormsUtils.resolveXMLBase(boundElement.orNull, null, ".").toString val (templateTree, compactTree) = { val templateOutput = new SAXStore val extractorOutput = TransformerUtils.getIdentityTransformerHandler val extractorDocument = new LocationDocumentResult extractorOutput.setResult(extractorDocument) TransformerUtils.writeDom4j(rawTree, new WhitespaceXMLReceiver( new XFormsAnnotator( templateOutput, new ScopeExtractor( new WhitespaceXMLReceiver( extractorOutput, Whitespace.defaultBasePolicy, Whitespace.basePolicyMatcher ), innerScope, outerScope, startScope, containerScope.fullPrefix, baseURI ), metadata, false ) { // Use prefixed id for marks and namespaces in order to avoid clashes between top-level // controls and shadow trees protected override def rewriteId(id: String) = containerScope.fullPrefix + id }, Whitespace.defaultHTMLPolicy, Whitespace.htmlPolicyMatcher ) ) (templateOutput, extractorDocument.getDocument) } if (logShadowTrees) debugResults(Seq( "full tree" → Dom4jUtils.domToString(TransformerUtils.saxStoreToDom4jDocument(templateTree)), "compact tree" → Dom4jUtils.domToString(compactTree) )) // Result is full annotated tree and, if needed, the compact tree (templateTree, compactTree) } private def processGlobalsIfNeeded(abstractBinding: AbstractBinding, locationData: LocationData): Unit = if (partAnalysis.isTopLevel) // see also "Issues with xxbl:global" in PartAnalysisImpl abstractBinding.global match { case Some(globalDocument) if ! abstractBindingsWithGlobals.exists(abstractBinding eq) ⇒ val (globalTemplateTree, globalCompactShadowTree) = withDebug("generating global XBL shadow content", Seq("binding id" → abstractBinding.bindingId.orNull)) { val topLevelScopeForGlobals = partAnalysis.startScope // TODO: in script mode, XHTML elements in template should only be kept during page generation annotateAndExtractSubtree( boundElement = None, rawTree = globalDocument, innerScope = topLevelScopeForGlobals, outerScope = topLevelScopeForGlobals, startScope = XXBLScope.inner, containerScope = topLevelScopeForGlobals, hasFullUpdate = hasFullUpdate(globalDocument), ignoreRoot = true ) } abstractBindingsWithGlobals += abstractBinding allGlobals += Global(globalTemplateTree, globalCompactShadowTree) case _ ⇒ // no global to process } // Generate raw (non-annotated) shadow content for the given control id and XBL binding. private def generateRawShadowTree(boundElement: Element, abstractBinding: AbstractBinding): Option[Document] = abstractBinding.templateElement map { templateElement ⇒ withDebug("generating raw XBL shadow content", Seq("binding id" → abstractBinding.bindingId.orNull)) { // TODO: in script mode, XHTML elements in template should only be kept during page generation // Here we create a completely separate document // 1. Apply optional preprocessing step (usually XSLT) // If @xxbl:transform is not present, just use a copy of the template element itself val shadowTreeDocument = abstractBinding.newTransform(boundElement) getOrElse Dom4jUtils.createDocumentCopyParentNamespaces(templateElement) // 2. Apply xbl:attr, xbl:content, xxbl:attr and index xxbl:scope XBLTransformer.transform( shadowTreeDocument, boundElement, abstractBinding.modeHandlers, abstractBinding.modeLHHA, abstractBinding.supportAVTs ) } } private def hasFullUpdate(shadowTreeDocument: Document) = if (Version.isPE) { var hasUpdateFull = false Dom4jUtils.visitSubtree(shadowTreeDocument.getRootElement, new Dom4jUtils.VisitorListener { def startElement(element: Element): Unit = { val xxformsUpdate = element.attributeValue(XXFORMS_UPDATE_QNAME) if (XFORMS_FULL_UPDATE == xxformsUpdate) hasUpdateFull = true } def endElement(element: Element) = () def text(text: Text) = () }, true) hasUpdateFull } else false // Keep public for unit tests def annotateShadowTree(shadowTree: Node, prefix: String): Document = { // Create transformer val identity = TransformerUtils.getIdentityTransformerHandler // Set result val documentResult = new LocationDocumentResult identity.setResult(documentResult) // Put SAXStore in the middle if we have full updates val output = identity // Write the document through the annotator // TODO: this adds xml:base on root element, must fix TransformerUtils.writeDom4j(shadowTree, new XFormsAnnotator(output, null, metadata, false) { // Use prefixed id for marks and namespaces to avoid clashes between top-level controls and shadow trees protected override def rewriteId(id: String) = prefix + id }) // Return annotated document documentResult.getDocument } private class ScopeExtractor( xmlReceiver : XMLReceiver, // output of transformation or null innerScope : Scope, // inner scope outerScope : Scope, // outer scope, i.e. scope of the bound startScope : XXBLScope, // scope of root element prefix : String, // prefix of the ids within the new shadow tree baseURI : String) // base URI of new tree extends XFormsExtractor(xmlReceiver, metadata, null, baseURI, startScope, false, false, true) { assert(innerScope ne null) assert(outerScope ne null) override def getPrefixedId(staticId: String) = prefix + staticId override def indexElementWithScope( uri : String, localname : String, attributes : Attributes, currentScope : XXBLScope ): Unit = { // Index prefixed id ⇒ scope val staticId = attributes.getValue("id") // NOTE: We can be called on HTML elements within LHHA, which may or may not have an id (they must have one // if they have AVTs). if (staticId ne null) { val prefixedId = prefix + staticId if (metadata.getNamespaceMapping(prefixedId) ne null) { val scope = if (currentScope == XXBLScope.inner) innerScope else outerScope // Enforce constraint that mapping must be unique if (scope.contains(staticId)) throw new OXFException("Duplicate id found for static id: " + staticId) // Index scope partAnalysis.mapScopeIds(staticId, prefixedId, scope, ignoreIfPresent = false) // Index AVT `for` if needed if (uri == XXFORMS_NAMESPACE_URI && localname == "attribute") { val forStaticId = attributes.getValue("for") val forPrefixedId = prefix + forStaticId partAnalysis.mapScopeIds(forStaticId, forPrefixedId, scope, ignoreIfPresent = true) } } } } } // NOTE: We used to clear metadata, but since the enclosing PartAnalysisImpl keeps a reference to metadata, there is // no real point to it. def freeTransientState() = metadata.commitBindingIndex() // This function is not called as of 2011-06-28 but if/when we support removing scopes, check these notes: // - deindex prefixed ids ⇒ Scope // - remove models associated with scope // - remove control analysis // - deindex scope id ⇒ Scope //def removeScope(scope: Scope) = ??? def hasBinding(controlPrefixedId: String) = getBinding(controlPrefixedId).isDefined def getBinding(controlPrefixedId: String) = concreteBindings.get(controlPrefixedId) def removeBinding(controlPrefixedId: String): Unit = concreteBindings -= controlPrefixedId // NOTE: Can't update abstractBindings, allScripts, allStyles, allGlobals without checking all again, so for now // leave that untouched. }
ajw625/orbeon-forms
src/main/scala/org/orbeon/oxf/xforms/xbl/XBLBindings.scala
Scala
lgpl-2.1
18,606
import sbt._ import sbt.Keys._ object PurfuncorBuild extends Build { lazy val purfuncor = Project( id = "purfuncor", base = file("."), settings = Project.defaultSettings ++ Seq( name := "Purfuncor", organization := "pl.luckboy.purfuncor", version := "0.1.1", scalaVersion := "2.10.1", // add other settings here libraryDependencies += "org.scalaz" % "scalaz-core_2.10" % "7.0.0", libraryDependencies += "org.scalatest" % "scalatest_2.10" % "1.9.1" % "test", libraryDependencies += "org.scala-lang" % "jline" % "2.10.1", scalacOptions ++= Seq("-feature", "-language:postfixOps", "-language:higherKinds", "-language:implicitConversions"), initialCommands in console := "import scalaz._, Scalaz._;", initialCommands in console += "import pl.luckboy.purfuncor._;", initialCommands in console += "import pl.luckboy.purfuncor.frontend.parser.Parser;", initialCommands in console += "import pl.luckboy.purfuncor.frontend.resolver.Resolver;", initialCommands in console += "import pl.luckboy.purfuncor.frontend.resolver.NameTree;", initialCommands in console += "import pl.luckboy.purfuncor.frontend.resolver.Scope;", initialCommands in console += "import pl.luckboy.purfuncor.backend.interp.Interpreter;", initialCommands in console += "import pl.luckboy.purfuncor.backend.interp.SymbolEnvironment;", initialCommands in console += "import pl.luckboy.purfuncor.frontend.kinder.Kinder;", initialCommands in console += "import pl.luckboy.purfuncor.frontend.kinder.SymbolKindInferenceEnvironment;", initialCommands in console += "import pl.luckboy.purfuncor.frontend.kinder.InferredKindTable;", initialCommands in console += "import pl.luckboy.purfuncor.frontend.lmbdindexer.LambdaIndexer;", initialCommands in console += "import pl.luckboy.purfuncor.frontend.parser.TypeLambdaInfo;", initialCommands in console += "import pl.luckboy.purfuncor.frontend.typer.Typer;", initialCommands in console += "import pl.luckboy.purfuncor.frontend.typer.SymbolTypeEnvironment;", initialCommands in console += "import pl.luckboy.purfuncor.frontend.typer.SymbolTypeInferenceEnvironment;", initialCommands in console += "import pl.luckboy.purfuncor.frontend.typer.InferredTypeTable;", initialCommands in console += "import pl.luckboy.purfuncor.frontend.instant.Instantiator;", initialCommands in console += "import pl.luckboy.purfuncor.frontend.instant.SymbolInstantiationEnvironment;", initialCommands in console += "import pl.luckboy.purfuncor.frontend.instant.InstanceTree;", initialCommands in console += "import pl.luckboy.purfuncor.frontend.instant.InstanceArgTable" ) ) }
luckboy/Purfuncor
project/PurfuncorBuild.scala
Scala
mpl-2.0
2,755
/** * Copyright (c) 2014-2016 Snowplow Analytics Ltd. * All rights reserved. * * This program is licensed to you under the Apache License Version 2.0, * and you may not use this file except in compliance with the Apache * License Version 2.0. * You may obtain a copy of the Apache License Version 2.0 at * http://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, * software distributed under the Apache License Version 2.0 is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, * either express or implied. * * See the Apache License Version 2.0 for the specific language * governing permissions and limitations there under. */ package com.snowplowanalytics.snowplow package storage import com.fasterxml.jackson.core.JsonParseException import java.nio.charset.StandardCharsets.UTF_8 import org.json4s.jackson.JsonMethods._ import org.json4s.JsonDSL._ import org.json4s._ import org.slf4j.LoggerFactory import scala.util.matching.Regex import _root_.scalaz._, Scalaz._ object Transformer { /** * Convert a Kafka record to a JSON string * * @param record Byte array representation of an enriched event string * @return ValidatedRecord for the event */ def transform(recordString: String): ValidatedRecord = // The -1 is necessary to prevent trailing empty strings from being discarded (recordString, jsonifyGoodEvent(recordString.split("\\t", -1)).leftMap(_.toList)) private val StringField: TsvToJsonConverter = (key, value) => JObject(key -> JString(value)).successNel private val IntField: TsvToJsonConverter = (key, value) => JObject(key -> JInt(value.toInt)).successNel private val BoolField: TsvToJsonConverter = handleBooleanField private val DoubleField: TsvToJsonConverter = (key, value) => JObject(key -> JDouble(value.toDouble)).successNel private val TstampField: TsvToJsonConverter = (key, value) => JObject(key -> JString(reformatTstamp(value))).successNel private val ContextsField: TsvToJsonConverter = (key, value) => Shredder.parseContexts(value) private val UnstructField: TsvToJsonConverter = (key, value) => Shredder.parseUnstruct(value) private val fields = List( "app_id" -> StringField, "platform" -> StringField, "etl_tstamp" -> TstampField, "collector_tstamp" -> TstampField, "dvce_created_tstamp" -> TstampField, "event" -> StringField, "event_id" -> StringField, "txn_id" -> IntField, "name_tracker" -> StringField, "v_tracker" -> StringField, "v_collector" -> StringField, "v_etl" -> StringField, "user_id" -> StringField, "user_ipaddress" -> StringField, "user_fingerprint" -> StringField, "domain_userid" -> StringField, "domain_sessionidx" -> IntField, "network_userid" -> StringField, "geo_country" -> StringField, "geo_region" -> StringField, "geo_city" -> StringField, "geo_zipcode" -> StringField, "geo_latitude" -> DoubleField, "geo_longitude" -> DoubleField, "geo_region_name" -> StringField, "ip_isp" -> StringField, "ip_organization" -> StringField, "ip_domain" -> StringField, "ip_netspeed" -> StringField, "page_url" -> StringField, "page_title" -> StringField, "page_referrer" -> StringField, "page_urlscheme" -> StringField, "page_urlhost" -> StringField, "page_urlport" -> IntField, "page_urlpath" -> StringField, "page_urlquery" -> StringField, "page_urlfragment" -> StringField, "refr_urlscheme" -> StringField, "refr_urlhost" -> StringField, "refr_urlport" -> IntField, "refr_urlpath" -> StringField, "refr_urlquery" -> StringField, "refr_urlfragment" -> StringField, "refr_medium" -> StringField, "refr_source" -> StringField, "refr_term" -> StringField, "mkt_medium" -> StringField, "mkt_source" -> StringField, "mkt_term" -> StringField, "mkt_content" -> StringField, "mkt_campaign" -> StringField, "contexts" -> ContextsField, "se_category" -> StringField, "se_action" -> StringField, "se_label" -> StringField, "se_property" -> StringField, "se_value" -> StringField, "unstruct_event" -> UnstructField, "tr_orderid" -> StringField, "tr_affiliation" -> StringField, "tr_total" -> DoubleField, "tr_tax" -> DoubleField, "tr_shipping" -> DoubleField, "tr_city" -> StringField, "tr_state" -> StringField, "tr_country" -> StringField, "ti_orderid" -> StringField, "ti_sku" -> StringField, "ti_name" -> StringField, "ti_category" -> StringField, "ti_price" -> DoubleField, "ti_quantity" -> IntField, "pp_xoffset_min" -> IntField, "pp_xoffset_max" -> IntField, "pp_yoffset_min" -> IntField, "pp_yoffset_max" -> IntField, "useragent" -> StringField, "br_name" -> StringField, "br_family" -> StringField, "br_version" -> StringField, "br_type" -> StringField, "br_renderengine" -> StringField, "br_lang" -> StringField, "br_features_pdf" -> BoolField, "br_features_flash" -> BoolField, "br_features_java" -> BoolField, "br_features_director" -> BoolField, "br_features_quicktime" -> BoolField, "br_features_realplayer" -> BoolField, "br_features_windowsmedia" -> BoolField, "br_features_gears" -> BoolField, "br_features_silverlight" -> BoolField, "br_cookies" -> BoolField, "br_colordepth" -> StringField, "br_viewwidth" -> IntField, "br_viewheight" -> IntField, "os_name" -> StringField, "os_family" -> StringField, "os_manufacturer" -> StringField, "os_timezone" -> StringField, "dvce_type" -> StringField, "dvce_ismobile" -> BoolField, "dvce_screenwidth" -> IntField, "dvce_screenheight" -> IntField, "doc_charset" -> StringField, "doc_width" -> IntField, "doc_height" -> IntField, "tr_currency" -> StringField, "tr_total_base" -> DoubleField, "tr_tax_base" -> DoubleField, "tr_shipping_base" -> DoubleField, "ti_currency" -> StringField, "ti_price_base" -> DoubleField, "base_currency" -> StringField, "geo_timezone" -> StringField, "mkt_clickid" -> StringField, "mkt_network" -> StringField, "etl_tags" -> StringField, "dvce_sent_tstamp" -> TstampField, "refr_domain_userid" -> StringField, "refr_device_tstamp" -> TstampField, "derived_contexts" -> ContextsField, "domain_sessionid" -> StringField, "derived_tstamp" -> TstampField, "event_vendor" -> StringField, "event_name" -> StringField, "event_format" -> StringField, "event_version" -> StringField, "event_fingerprint" -> StringField, "true_tstamp" -> TstampField ) private object GeopointIndexes { val latitude = 22 val longitude = 23 } /** * Converts a timestamp to ISO 8601 format * * @param tstamp Timestamp of the form YYYY-MM-DD hh:mm:ss * @return ISO 8601 timestamp */ private def reformatTstamp(tstamp: String): String = tstamp.replaceAll(" ", "T") + "Z" /** * Converts "0" to false and "1" to true * * @param key The field name * @param value The field value - should be "0" or "1" * @return Validated JObject */ private def handleBooleanField(key: String, value: String): ValidationNel[String, JObject] = value match { case "1" => JObject(key -> JBool(true)).successNel case "0" => JObject(key -> JBool(false)).successNel case _ => "Value [%s] is not valid for field [%s]: expected 0 or 1".format(value, key).failureNel } private lazy val log = LoggerFactory.getLogger(getClass()) /** * Convert the value of a field to a JValue based on the name of the field * * @param fieldInformation ((field name, field-to-JObject conversion function), field value) * @return JObject representing a single field in the JSON */ private def converter( fieldInformation: ((String, TsvToJsonConverter), String)): ValidationNel[String, JObject] = { val ((fieldName, fieldConversionFunction), fieldValue) = fieldInformation if (fieldValue.isEmpty) { JObject(fieldName -> JNull).successNel } else { try { fieldConversionFunction(fieldName, fieldValue) } catch { case e @ (_: IllegalArgumentException | _: JsonParseException) => "Value [%s] is not valid for field [%s]: %s" .format(fieldValue, fieldName, e.getMessage) .failureNel } } } /** * Converts an aray of field values to a JSON whose keys are the field names * * @param event Array of values for the event * @return ValidatedRecord containing JSON for the event and the event_id (if it exists) */ private def jsonifyGoodEvent(event: Array[String]): ValidationNel[String, JsonRecord] = { if (event.size != fields.size) { log.warn( s"Expected ${fields.size} fields, received ${event.size} fields. This may be caused by using an outdated version of Snowplow Kafka Enrich.") } if (event.size <= Transformer.GeopointIndexes.latitude.max( Transformer.GeopointIndexes.longitude)) { s"Event contained only ${event.size} tab-separated fields".failureNel } else { val geoLocation: JObject = { val latitude = event(Transformer.GeopointIndexes.latitude) val longitude = event(Transformer.GeopointIndexes.longitude) if (latitude.size > 0 && longitude.size > 0) { JObject("geo_location" -> JString(s"$latitude,$longitude")) } else JObject() } val validatedJObjects: List[ValidationNel[String, JObject]] = fields.zip(event.toList).map(converter) val switched: ValidationNel[String, List[JObject]] = validatedJObjects.sequenceU switched.map { x => val j = x.fold(geoLocation)((x, y) => y ~ x) JsonRecord(j, extractEventId(j)) } } } /** * Extract the event_id field from an event JSON for use as a document ID * * @param json * @return Option boxing event_id */ private def extractEventId(json: JValue): Option[String] = json \\ "event_id" match { case JString(eid) => eid.some case _ => None } }
TimothyKlim/snowplow
4-storage/kafka-elasticsearch-sink/src/main/scala/com/snowplowanalytics/snowplow/storage/Transformer.scala
Scala
apache-2.0
11,902
package parsing.ir.normalized import parsing.ir.homo.Token /** * Created by hongdi.ren. */ object ExprNodeTest extends App { val plus = Token(Token.PLUS, "+") val one = Token(Token.INT, "1") val two = Token(Token.INT, "2") val root = AddNode(IntNode(one), plus, IntNode(two)) println(root.treeString()) }
Ryan-Git/LangImplPatterns
src/test/scala/parsing/ir/normalized/ExprNodeTest.scala
Scala
apache-2.0
323
/* * Artificial Intelligence for Humans * Volume 2: Nature Inspired Algorithms * Java Version * http://www.aifh.org * http://www.jeffheaton.com * * Code repository: * https://github.com/jeffheaton/aifh * * Copyright 2014 by Jeff Heaton * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * For more information on Heaton Research copyrights, licenses * and trademarks visit: * http://www.heatonresearch.com/copyright */ package com.heatonresearch.aifh.evolutionary.sort import com.heatonresearch.aifh.evolutionary.genome.Genome import java.util.Comparator /** * Defines methods for comparing genomes. Also provides methods to apply bonuses * and penalties. */ trait GenomeComparator extends Comparator[Genome] { /** * Apply a bonus, this is a simple percent that is applied in the direction * specified by the "should minimize" property of the score function. * * @param value The current value. * @param bonus The bonus. * @return The resulting value. */ def applyBonus(value: Double, bonus: Double): Double /** * Apply a penalty, this is a simple percent that is applied in the * direction specified by the "should minimize" property of the score * function. * * @param value The current value. * @param bonus The penalty. * @return The resulting value. */ def applyPenalty(value: Double, bonus: Double): Double /** * Determine if one score is better than the other. * * @param d1 The first score to compare. * @param d2 The second score to compare. * @return True if d1 is better than d2. */ def isBetterThan(d1: Double, d2: Double): Boolean /** * Determine if one genome is better than the other genome. * * @param genome1 The first genome. * @param genome2 The second genome. * @return True, if genome1 is better than genome2. */ def isBetterThan(genome1: Genome, genome2: Genome): Boolean /** * @return Returns true if the score should be minimized. */ def shouldMinimize: Boolean }
PeterLauris/aifh
vol2/vol2-scala-examples/src/main/scala/com/heatonresearch/aifh/evolutionary/sort/GenomeComparator.scala
Scala
apache-2.0
2,527
package models; import scala.collection.immutable object PaypalCountry { val map = immutable.Map[CountryCode, String]( CountryCode.JPN -> "JP" ) def apply(cc: CountryCode): Option[String] = map.get(cc) }
ruimo/store2
app/models/PaypalCountry.scala
Scala
apache-2.0
217
package com.github.reikje import com.github.reikje.specifiers._ import scala.collection.mutable /** * [[com.github.reikje.PrintfStringOps]] adds a new method '''formatx''' to the String class via implicit conversions. * * @author rschatz */ object PrintfStringOps { private val pattern = "\\\\%[\\\\w]+".r private lazy val conversions: Iterable[SupportedStringConversion] = { UnsignedDecimalInteger :: UnsignedLongLongInteger :: UnsignedHexadecimalInteger :: PointerAddress :: Nil } private val stringSpecifier = "%s" implicit class AdditionalSpecifiers(val s: String) { def formatx(args: Any*): String = { if (args != null) { val occurences = mutable.Map.empty[String, Int] var arguments = args.toList pattern.findAllIn(s).zipWithIndex.foreach { case (specifier, i) => if (i < arguments.size) { conversions.find(conversion => conversion.specifier == specifier) match { case Some(conversion) => val count = occurences.getOrElse(specifier, 0) occurences.put(specifier, count + 1) arguments = arguments.updated(i, conversion.convert(arguments(i))) case _ => // do nothing } } } // for each of the supported specifiers replace as many we had arguments for with %s, so we get // the correct MissingFormatArgumentException if the user didn't specify enough args val patched = occurences.keys.foldLeft(s) { case (current, key) => Range(0, occurences.getOrElse(key, 0)).foldLeft(current) { case (now, _) => now.replaceFirst(key, stringSpecifier) } } patched.format(arguments :_ *) } else { s.format(args) } } } }
reikje/printf-ext
src/main/scala/com/github/reikje/PrintfStringOps.scala
Scala
apache-2.0
1,853
package org.ensime.api import java.io.File sealed abstract class DeclaredAs(val symbol: scala.Symbol) object DeclaredAs { case object Method extends DeclaredAs('method) case object Trait extends DeclaredAs('trait) case object Interface extends DeclaredAs('interface) case object Object extends DeclaredAs('object) case object Class extends DeclaredAs('class) case object Field extends DeclaredAs('field) case object Nil extends DeclaredAs('nil) def allDeclarations = Seq(Method, Trait, Interface, Object, Class, Field, Nil) } sealed trait FileEdit extends Ordered[FileEdit] { def file: File def text: String def from: Int def to: Int // Required as of Scala 2.11 for reasons unknown - the companion to Ordered // should already be in implicit scope import scala.math.Ordered.orderingToOrdered def compare(that: FileEdit): Int = (this.file, this.from, this.to, this.text).compare((that.file, that.from, that.to, that.text)) } case class TextEdit(file: File, from: Int, to: Int, text: String) extends FileEdit // the next case classes have weird fields because we need the values in the protocol case class NewFile(file: File, from: Int, to: Int, text: String) extends FileEdit object NewFile { def apply(file: File, text: String): NewFile = new NewFile(file, 0, text.length - 1, text) } case class DeleteFile(file: File, from: Int, to: Int, text: String) extends FileEdit object DeleteFile { def apply(file: File, text: String): DeleteFile = new DeleteFile(file, 0, text.length - 1, text) } sealed trait NoteSeverity case object NoteError extends NoteSeverity case object NoteWarn extends NoteSeverity case object NoteInfo extends NoteSeverity object NoteSeverity { def apply(severity: Int) = severity match { case 2 => NoteError case 1 => NoteWarn case 0 => NoteInfo } } sealed abstract class RefactorLocation(val symbol: Symbol) object RefactorLocation { case object QualifiedName extends RefactorLocation('qualifiedName) case object File extends RefactorLocation('file) case object NewName extends RefactorLocation('newName) case object Name extends RefactorLocation('name) case object Start extends RefactorLocation('start) case object End extends RefactorLocation('end) case object MethodName extends RefactorLocation('methodName) } sealed abstract class RefactorType(val symbol: Symbol) object RefactorType { case object Rename extends RefactorType('rename) case object ExtractMethod extends RefactorType('extractMethod) case object ExtractLocal extends RefactorType('extractLocal) case object InlineLocal extends RefactorType('inlineLocal) case object OrganizeImports extends RefactorType('organizeImports) case object AddImport extends RefactorType('addImport) def allTypes = Seq(Rename, ExtractMethod, ExtractLocal, InlineLocal, OrganizeImports, AddImport) }
eddsteel/ensime
api/src/main/scala/org/ensime/api/common.scala
Scala
gpl-3.0
2,867
package java.lang import scala.scalanative.native.Ptr import scala.scalanative.runtime.Type final class _Class[A](val ty: Ptr[Type]) { def getName(): String = (!ty).name override def hashCode: Int = ty.cast[Long].## override def equals(other: Any): scala.Boolean = other match { case other: _Class[_] => ty.cast[Long] == other.ty.cast[Long] case _ => false } // TODO: def getInterfaces(): Array[_Class[_]] = ??? def getSuperclass(): _Class[_] = ??? def getComponentType(): _Class[_] = ??? def isArray(): scala.Boolean = ??? } object _Class { private[java] implicit def _class2class[A](cls: _Class[A]): Class[A] = cls.asInstanceOf[Class[A]] private[java] implicit def class2_class[A](cls: Class[A]): _Class[A] = cls.asInstanceOf[_Class[A]] }
phdoerfler/scala-native
nativelib/src/main/scala/java/lang/Class.scala
Scala
bsd-3-clause
817
package com.bostontechnologies.quickfixs.authentication import quickfix.SessionID trait FixCredentialsValidator { def isValid(credentials: Credentials, sessionId: SessionID): Boolean }
Forexware/quickfixs
src/main/scala/com/bostontechnologies/quickfixs/authentication/FixCredentialsValidator.scala
Scala
apache-2.0
190
package dal import javax.inject.{ Inject, Singleton } import play.api.db.slick.DatabaseConfigProvider import slick.driver.JdbcProfile import java.time.LocalDateTime import models.Tour import scala.concurrent.{ Future, ExecutionContext } import scala.util.{Success, Failure} /** * A repository for tours. * * @param dbConfigProvider The Play db config provider. Play will inject this * for you. */ @Singleton class TourRepository @Inject()( dbConfigProvider: DatabaseConfigProvider, categories: CategoryRepository, tourCategories: TourCategoryRepository)( implicit ec: ExecutionContext) { // We want the JdbcProfile for this provider private val dbConfig = dbConfigProvider.get[JdbcProfile] // These imports are important, the first one brings db into scope, which will // let you do the actual db operations. // The second one brings the Slick DSL into scope, which lets you define the // table and other queries. import dbConfig._ import driver.api._ /** * Here we define the table. It will have a name of "tours" within the * database */ private class ToursTable(tag: Tag) extends Table[Tour](tag, "tours") { /** The ID column, which is the primary key, and auto incremented */ def id = column[Long]("id", O.PrimaryKey, O.AutoInc) /** The name column, can't be null */ def name = column[String]("name") /** The description column, can't be null */ def description = column[String]("description") /** The last time the tour was updated */ def lastUpdated = column[String]("last_updated") /** * This is the tables default "projection". * * It defines how the columns are converted to and from the Tour object. */ def * = (id, name, description, lastUpdated) <> ((Tour.apply _).tupled, Tour.unapply) } /** * The starting point for all queries on the tours table. */ private val tours = TableQuery[ToursTable] /** * Create a tour with the given values. * * This is an asynchronous operation, it will return a future of the created * tour, which can be used to obtain the * id for that tour. */ def create( name: String, description: String): Future[Tour] = db.run { // We create a projection of just the main columns, since // we're not inserting a value for the id column (tours.map(t => (t.name, t.description, t.lastUpdated)) // Now define it to return the id, because we want to know what id was // generated for the tour returning tours.map(_.id) // And we define a transformation for the returned value, which combines // our original parameters with the returned id into ((tour, id) => Tour(id, tour._1, tour._2, tour._3)) // And finally, insert the tour into the database ) += (name, description, LocalDateTime.now().toString.replace('T', ' ')) } /** * List all the tours in the database. */ def list(): Future[Seq[Tour]] = db.run { tours.result } /** * Find tour with corresponding id */ def findById(id: Long): Future[Option[Tour]] = db.run { tours.filter(_.id === id).result.headOption } /** * Update the given tour's timestamp */ def updateTimestamp(id: Long) = { // We want to also update any categories this tour is a part of tourCategories.findByTourId(id) onComplete { case Success(catIds) => catIds.map { catId => categories.updateTimestamp(catId) } case Failure(err) => println("An error occurred: " + err.getMessage) } db.run { val tStamp = for { t <- tours if t.id === id } yield t.lastUpdated // Update the timestamp, make sure it's in a consistent format tStamp.update(LocalDateTime.now().toString.replace('T', ' ')) } } }
kelleyb/RPI-Tours-Backend
app/dal/TourRepository.scala
Scala
mit
3,830
package com.thangiee.lolhangouts.data.usecases.entities case class TopChampion (name: String, numOfGames: Int, winsRate: Double, avgKills: Double, avgKillsPerformance: Double, avgDeaths: Double, avgDeathsPerformance: Double, avgAssists: Double, avgAssistsPerformance: Double, avgCs: Int, avgCsPerformance: Int, avgGold: Int, avgGoldPerformance: Int, overAllPerformance: Double = 0.0 )
Thangiee/LoL-Hangouts
src/com/thangiee/lolhangouts/data/usecases/entities/TopChampion.scala
Scala
apache-2.0
401
package org.clulab.struct import scala.collection.mutable import scala.collection.mutable.ListBuffer import scala.util.matching.Regex /** * An inverted index of the DirectedGraph, so we can efficiently implement enhanced dependencies * User: mihais * Date: 8/2/17 */ class DirectedGraphIndex[E]( val size: Int, val outgoingEdges:Array[mutable.HashSet[(Int, E)]], // from head to modifier val incomingEdges:Array[mutable.HashSet[(Int, E)]], // from modifier to head val edgesByName:mutable.HashMap[E, mutable.HashSet[(Int, Int)]]) { // indexes edges by label def this(sentenceLength:Int) { this(sentenceLength, DirectedGraphIndex.mkOutgoing[E](sentenceLength), DirectedGraphIndex.mkIncoming[E](sentenceLength), new mutable.HashMap[E, mutable.HashSet[(Int, Int)]]() ) } def addEdge(head:Int, modifier:Int, label:E) { outgoingEdges(head) += Tuple2(modifier, label) incomingEdges(modifier) += Tuple2(head, label) val byLabel = edgesByName.getOrElseUpdate(label, new mutable.HashSet[(Int, Int)]()) byLabel += Tuple2(head, modifier) } def removeEdge(head:Int, modifier:Int, label:E): Unit = { outgoingEdges(head).remove(Tuple2(modifier, label)) incomingEdges(modifier).remove(Tuple2(head, label)) val byLabel = edgesByName.get(label) if(byLabel.nonEmpty) { byLabel.get.remove(Tuple2(head, modifier)) } } def findByName(label:E): Seq[Edge[E]] = { val edges = new ListBuffer[Edge[E]] edgesByName.get(label).foreach(ses => for(se <- ses) { edges += Edge(se._1, se._2, label) } ) edges } def findByHeadAndName(head:Int, label:E): Seq[Edge[E]] = { findByName(label).filter(_.source == head) } def findByModifierAndName(modifier:Int, label:E): Seq[Edge[E]] = { findByName(label).filter(_.destination == modifier) } def findByHeadAndPattern(head:Int, pattern:Regex): Seq[Edge[E]] = { val edges = new ListBuffer[Edge[E]] if(head < outgoingEdges.length) { for (e <- outgoingEdges(head).toList) { if (pattern.findFirstMatchIn(e._2.toString).nonEmpty) { edges += new Edge[E](head, e._1, e._2) } } } edges } def findByModifierAndPattern(modifier:Int, pattern:Regex): Seq[Edge[E]] = { val edges = new ListBuffer[Edge[E]] if(modifier < incomingEdges.length) { for (e <- incomingEdges(modifier).toList) { if (pattern.findFirstMatchIn(e._2.toString).nonEmpty) { edges += new Edge[E](e._1, modifier, e._2) } } } edges } def mkEdges(): List[Edge[E]] = { val edges = new ListBuffer[Edge[E]] for (head <- outgoingEdges.indices) { for (ml <- outgoingEdges(head)) { val e = new Edge[E](head, ml._1, ml._2) edges += e } } edges.toList } def toDirectedGraph(preferredSize: Option[Int] = None): DirectedGraph[E] = { val edges = mkEdges() new DirectedGraph[E](edges, preferredSize) } } object DirectedGraphIndex { private def mkOutgoing[E](len:Int): Array[mutable.HashSet[(Int, E)]] = { val outgoing = new Array[mutable.HashSet[(Int, E)]](len) for(i <- outgoing.indices) outgoing(i) = new mutable.HashSet[(Int, E)]() outgoing } private def mkIncoming[E](len:Int): Array[mutable.HashSet[(Int, E)]] = { val incoming = new Array[mutable.HashSet[(Int, E)]](len) for(i <- incoming.indices) incoming(i) = new mutable.HashSet[(Int, E)]() incoming } }
sistanlp/processors
main/src/main/scala/org/clulab/struct/DirectedGraphIndex.scala
Scala
apache-2.0
3,500
package yiris.core.removals import org.scalatest.{FlatSpec, Matchers} /** * Created by kasonchan on 12/2/15. */ class RemovalsSuite extends FlatSpec with Matchers with Removals { "Default removals" should "pass" in { val defaultRemovals = Removals() defaultRemovals.tss shouldBe None defaultRemovals.bod5 shouldBe None defaultRemovals.nh3n shouldBe None defaultRemovals.tp shouldBe None defaultRemovals.fecalColiform shouldBe None defaultRemovals.enterococci shouldBe None } }
kasonchan/yiris
core/src/test/scala/yiris/core/removals/RemovalsSuite.scala
Scala
apache-2.0
515
package example import diode._ import org.scalajs.dom._ // marker trait to identify actions that should be RAF batched trait RAFAction extends Action private[example] final case class RAFWrapper(action: Any, dispatch: Dispatcher) extends Action final case class RAFTimeStamp(time: Double) extends Action class RAFBatcher[M <: AnyRef] extends ActionProcessor[M] { private var batch = List.empty[RAFWrapper] private var frameRequested = false /** * Callback for RAF. * * @param time * Precise time of the frame */ private def nextAnimationFrame(time: Double): Unit = { frameRequested = false if (batch.nonEmpty) { val curBatch = batch batch = Nil // dispatch all actions in the batch, supports multiple different dispatchers curBatch.reverse.groupBy(_.dispatch).foreach { case (dispatch, actions) => // Precede actions with a time stamp action to get correct time in animations. // When dispatching a sequence, Circuit optimizes processing internally and only calls // listeners after all the actions are processed dispatch(RAFTimeStamp(time) +: ActionBatch(actions: _*)) } // request next frame requestAnimationFrame } else { // got no actions to dispatch, no need to request next frame } } /** * Requests an animation frame from the browser, unless one has already been requested */ private def requestAnimationFrame: Unit = { if (!frameRequested) { frameRequested = true window.requestAnimationFrame(nextAnimationFrame _) } } override def process(dispatch: Dispatcher, action: Any, next: Any => ActionResult[M], currentModel: M) = { action match { case rafAction: RAFAction => // save action into the batch using a wrapper batch = RAFWrapper(rafAction, dispatch) :: batch // request animation frame to run the batch requestAnimationFrame // skip processing of the action for now ActionResult.NoChange case RAFWrapper(rafAction, _) => // unwrap the RAF action and continue processing normally next(rafAction) case _ => // default is to just call the next processor next(action) } } }
ochrons/diode
examples/raf/src/main/scala/example/RAFBatcher.scala
Scala
mit
2,289
package at.forsyte.apalache.tla.bmcmt.config import at.forsyte.apalache.tla.bmcmt.types.eager.TrivialTypeFinder import at.forsyte.apalache.tla.lir.storage.ChangeListener import at.forsyte.apalache.tla.lir.transformations.TransformationTracker import at.forsyte.apalache.tla.lir.transformations.impl.TrackerWithListeners import com.google.inject.{Inject, Provider, Singleton} /** * Jure, 4.10.19: This implementation is completely pointless, due to the package architecture. * Because the provider uses TrivialTypeFinder, which is defined in `bmcmt`, it cannot be * used in all of the passes (e.g. PreproPass), since `pp` cannot have a dependency on `bmcmt`. * But because all passes (including PreproPass) require trackers, PreproPassImpl must * construct its own tracker anyway. */ /** * A factory that creates a singleton transformation tracker. The reason for having this factory is that we have * to pass a list of transformation listeners to the tracker, while the listeners are injected by Guice. * * @param changeListener a listener that records which expression was transformed into which expression * * @author Igor Konnov */ @Singleton class TransformationTrackerProvider @Inject()(changeListener: ChangeListener, trivialTypeFinder: TrivialTypeFinder) extends Provider[TransformationTracker] { private val tracker = TrackerWithListeners(changeListener, trivialTypeFinder) override def get(): TransformationTracker = { tracker } }
konnov/apalache
tla-bmcmt/src/main/scala/at/forsyte/apalache/tla/bmcmt/config/TransformationTrackerProvider.scala
Scala
apache-2.0
1,485
package org.jetbrains.plugins.scala package annotator import org.intellij.lang.annotations.Language import org.jetbrains.plugins.scala.base.SimpleTestCase import org.jetbrains.plugins.scala.extensions._ import org.jetbrains.plugins.scala.lang.psi.api.ScalaFile import org.jetbrains.plugins.scala.lang.psi.api.base.ScConstructor import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.ScClass import org.jetbrains.plugins.scala.lang.psi.types.Compatibility class ConstructorAnnotatorTest extends SimpleTestCase { final val Header = """ class Seq[+A] object Seq { def apply[A](a: A) = new Seq[A] } class A(a: Int) class B[X](a: X) class C(a: Int) { def this() = this(0) } class D(a: Int) { def this(a: Boolean) = this(0) } class Z[+A]; object Z extends Z[Nothing]; class Y[+A] class E[X](a: Z[X]) { def this(o: Y[X]) = this(Z) } class F(implicit a: Int) class Klass[K](a: K) type Alias[A] = Klass[A] new Alias("") val iAmAScriptFile = () """ def testEmpty() { assertMatches(messages("")) { case Nil => } } def testFine() { val codes = Seq( "new A(0)", "new A(a = 0)", "new B[Int](0)}", "new B(0)", "new C(0)", "new C()", "new C", "new D(0)", "new D(false)", "new E[Int](new Y[Int])", "new E[Int](new Z[Int])", "new E(new Y[Int])", "new E(new Z[Int])", "new Alias[Int](0)" ) for {code <- codes} { assertMatches(messages(code)) { case Nil => } } } def testExcessArguments() { assertMatches(messages("new A(0, 1)")) { case Error("1", "Too many arguments for constructor") :: Nil => } } def testMissedParameters() { assertMatches(messages("new A")) { case Error(_, "Unspecified value parameters: a: Int") :: Nil => } assertMatches(messages("new A()")) { case Error(_, "Unspecified value parameters: a: Int") :: Nil => } assertMatches(messages("new B[Int]()")) { case Error(_, "Unspecified value parameters: a: X") :: Nil => } } def testNamedDuplicates() { assertMatches(messages("new A(a = null, a = Unit)")) { case Error("a", "Parameter specified multiple times") :: Error("a", "Parameter specified multiple times") :: Nil => } } def testTypeMismatch() { assertMatches(messages("new A(false)")) { case Error("false", "Type mismatch, expected: Int, actual: Boolean") :: Nil => } assertMatches(messages("new B[Int](false)")) { case Error("false", "Type mismatch, expected: Int, actual: Boolean") :: Nil => } } def testMalformedSignature() { assertMatches(messages("class Malformed(a: A*, b: B); new Malformed(0)")) { case Error("Malformed", "Constructor has malformed definition") :: Nil => } } // TODO: Type Aliases //class A(a: Int) //class B[X](a: X) // //type AA[A] = A[A] //type BB[A] = B[A] //new AA(0) //new BB(0) //new AA[Int](0) def messages(@Language(value = "Scala", prefix = Header) code: String): List[Message] = { val annotator = new ConstructorAnnotator {} val file: ScalaFile = (Header + code).parse val mock = new AnnotatorHolderMock(file) val seq = file.depthFirst().findByType[ScClass] Compatibility.seqClass = seq try { file.depthFirst().filterByType[ScConstructor].foreach { annotator.annotateConstructor(_, mock) } mock.annotations } finally { Compatibility.seqClass = None } } }
loskutov/intellij-scala
test/org/jetbrains/plugins/scala/annotator/ConstructorAnnotatorTest.scala
Scala
apache-2.0
3,558
package com.lyrx.text import com.lyrx.text.TextTypes.Pars /** * Created by alex on 25.04.17. */ abstract class DocumentStructure[T]()(implicit val ctx: Context, val coll: Collector[T], var generator: ParBasedGenerator[T]) { def collect(): ParBasedGenerator[T] implicit class RichString(s:String){ def asTitle()= newSection(s); def asSubTitle = newSubSection(s) def asInputSource() = withParagraphs(s) def asInclude() = { withParagraphs(s) all() } def asCodeExample() = codeExample(s) def asCodeFile() = codeFile(s) } implicit class RichInt(aNum:Int){ def asNormalParagraphs() = normal(aNum) def asItalicParagraphs() = italic(aNum) def itemized() = itemize(aNum) def enumerated() = enumerate(aNum) def dropped() = drop(aNum) } def codeFile(aFile: String) = { generator = generator.codeFile(aFile) } def codeExample(aString: String) = { generator = generator.codeExample(aString) } def annotatedPar(annotation: Option[String]) = { generator = generator.annotatedPar(annotation) } def bibliography() = { generator = generator.bibliography() } def toc() = { generator = generator.toc() } def itemize() = { val used = generator.parData.paragraphs.length generator = generator.itemize() } def itemize(count: Int) = { generator = generator.itemize(count) } def enumerate() = { val used = generator.parData.paragraphs.length generator = generator.enumerate() } def enumerate(count: Int) = { generator = generator.enumerate(count) } def withParagraphs(aName: String) = { generator = generator.withParagraphs(aName) } def normal(aTake: Int) = { val pars:Pars = generator.parData.paragraphs generator = generator.normal(aTake) } def drop(aDrop: Int) = { generator = generator.drop(aDrop) } def all() = { generator = generator.all() } def normalItalic(takes: Int*) = { generator = generator.normalItalic(takes: _*) } def italicNormal(takes: Int*) = { generator = generator.italicNormal(takes: _*) } def italic() = { generator = generator.italic() } def italic(aTake: Int) = { generator = generator.italic(aTake) } def newSection(title: String) = { generator = generator.newSection(title) } def newSubSection(s: String) = { generator = generator.newSubSection(s) } def newSection() = { generator = generator.newSection() } def newSubSection() = { generator = generator.newSection() } def separator() { generator = generator.separator() } def trim() { generator = generator.trim() } }
lyrx/lyrxgenerator
src/main/scala/com/lyrx/text/DocumentStructure.scala
Scala
gpl-3.0
2,749
package org.genivi.sota.messaging import cats.data.Xor import com.typesafe.config.{Config, ConfigException} object ConfigHelpers { implicit class RichConfig(config: Config) { def configAt(path: String): ConfigException Xor Config = Xor.catchOnly[ConfigException](config.getConfig(path)) def readString(path: String): ConfigException Xor String = Xor.catchOnly[ConfigException](config.getString(path)) def readInt(path: String): ConfigException Xor Int = Xor.catchOnly[ConfigException](config.getInt(path)) } }
PDXostc/rvi_sota_server
common-messaging/src/main/scala/org/genivi/sota/messaging/ConfigHelpers.scala
Scala
mpl-2.0
549
package io.digitallibrary.bookapi.controller import io.digitallibrary.bookapi.{BookSwagger, TestData, TestEnvironment, UnitSuite} import io.digitallibrary.language.model.LanguageTag import org.scalatra.test.scalatest.ScalatraFunSuite import org.mockito.Mockito._ import org.mockito.ArgumentMatchers._ class SourceControllerTest extends UnitSuite with TestEnvironment with ScalatraFunSuite { implicit val swagger: BookSwagger = new BookSwagger lazy val controller = new SourceController addServlet(controller, "/*") override def beforeEach: Unit = { reset(readService) } test("that GET /eng returns a list of sources for a valid language") { when(readService.listSourcesForLanguage(any[Option[LanguageTag]])).thenReturn(Seq(TestData.Api.DefaultSource)) get("/eng", headers = Seq(("Authorization", s"Bearer ${TestData.validTestTokenWithAdminReadRole}"))) { status should equal(200) body should equal(s"""[{"source":"${TestData.Api.DefaultSource.source}","count":${TestData.Api.DefaultSource.count}}]""") } } test("that GET /eng returns a list of sources for a valid language with multiple sources") { when(readService.listSourcesForLanguage(any[Option[LanguageTag]])).thenReturn(Seq(TestData.Api.DefaultSource, TestData.Api.SecondarySource)) get("/eng" , headers = Seq(("Authorization", s"Bearer ${TestData.validTestTokenWithAdminReadRole}"))) { status should equal(200) body should equal(s"""[{"source":"${TestData.Api.DefaultSource.source}","count":${TestData.Api.DefaultSource.count}},{"source":"${TestData.Api.SecondarySource.source}","count":${TestData.Api.SecondarySource.count}}]""") } } test("that GET /not-valid returns 400 for an invalid language") { get("/not-valid", headers = Seq(("Authorization", s"Bearer ${TestData.validTestTokenWithAdminReadRole}"))) { status should equal(400) } } test("that GET /eng returns empty list when there is no sources") { when(readService.listSourcesForLanguage(any[Option[LanguageTag]])).thenReturn(Seq()) get("/eng", headers = Seq(("Authorization", s"Bearer ${TestData.validTestTokenWithAdminReadRole}"))) { status should equal(200) body should equal ("[]") } } test("that GET /eng returns 403 when not authorized") { when(readService.listSourcesForLanguage(any[Option[LanguageTag]])).thenReturn(Seq(TestData.Api.DefaultSource)) get("/eng", headers = Seq(("Authorization", s"Bearer ${TestData.invalidTestToken}"))) { status should equal (403) } } }
GlobalDigitalLibraryio/book-api
src/test/scala/io/digitallibrary/bookapi/controller/SourceControllerTest.scala
Scala
apache-2.0
2,553
package name.abhijitsarkar.scala.scalaimpatient.types class DocumentProperty object Title extends DocumentProperty object Author extends DocumentProperty object Then /** * Q3: Complete the fluent interface in Section 18.1, "Singleton Types", so that one can call * * `book set Title to "Scala for the Impatient" set Author to "Cay Horstmann"` */ class Document { private var title: String = null private var author: String = null private var useNextArgAs: Any = null def set(obj: DocumentProperty): this.type = { useNextArgAs = obj; this } def and(t: Then.type): this.type = this def to(arg: String): this.type = { useNextArgAs match { case _: Title.type => title = arg case _: Author.type => author = arg } this } def getTitle = title def getAuthor = author } class Book extends Document {}
abhijitsarkar/scala-impatient
src/main/scala/name/abhijitsarkar/scala/scalaimpatient/types/Document.scala
Scala
gpl-3.0
845
package com.equalinformation.interview.algo.scala.test.string_and_array import com.equalinformation.interview.algo.scala.string_and_array.RotateArrayS1IntermediateArray_O_n import org.scalatest.FlatSpec /** * Created by bpupadhyaya on 6/13/16. */ class RotateArrayS1IntermediateArraySpec extends FlatSpec { "Rotate array" should "rotate elements by specified number of times" in { val soln = new RotateArrayS1IntermediateArray_O_n val givenArray = Array(10,11,12,13,14) val k = 3 val result = soln.rotate(givenArray,k) assert(result(0) == 12 ) } it should "throw ArrayIndexOutOfBoundException if rotation value is less than 0" in { val soln = new RotateArrayS1IntermediateArray_O_n val givenArray = Array(9,8,7,6) val k = -1 intercept[ArrayIndexOutOfBoundsException] { soln.rotate(givenArray,k) } } }
bpupadhyaya/interview-collections
interview-collections-algo/src/test/scala/com/equalinformation/interview/algo/scala/test/string_and_array/RotateArrayS1IntermediateArraySpec.scala
Scala
mit
865
package org.jetbrains.plugins.hocon.psi import java.{lang => jl} import com.intellij.extapi.psi.ASTWrapperPsiElement import com.intellij.lang.ASTNode import com.intellij.openapi.roots.ProjectRootManager import com.intellij.openapi.util.text.StringUtil import com.intellij.psi._ import com.intellij.psi.impl.source.resolve.reference.impl.providers.FileReference import com.intellij.psi.tree.IElementType import org.jetbrains.plugins.hocon.CommonUtil._ import org.jetbrains.plugins.hocon.HoconConstants import org.jetbrains.plugins.hocon.HoconConstants._ import org.jetbrains.plugins.hocon.lexer.{HoconTokenSets, HoconTokenType} import org.jetbrains.plugins.hocon.parser.HoconElementType import org.jetbrains.plugins.hocon.ref.{HKeySelfReference, IncludedFileReferenceSet} import scala.reflect.{ClassTag, classTag} sealed abstract class HoconPsiElement(ast: ASTNode) extends ASTWrapperPsiElement(ast) { type Parent <: PsiElement override def getContainingFile: HoconPsiFile = super.getContainingFile.asInstanceOf[HoconPsiFile] def parent: Option[Parent] = getParent match { case p: PsiElement => Option(p.asInstanceOf[Parent]) case _ => None } def parents: Iterator[HoconPsiElement] = Iterator.iterate(this)(_.parent match { case Some(he: HoconPsiElement) => he case _ => null }).takeWhile(_ != null) def elementType: IElementType = getNode.getElementType def getChild[T >: Null : ClassTag]: T = findChildByClass(classTag[T].runtimeClass.asInstanceOf[Class[T]]) def findChild[T >: Null : ClassTag] = Option(getChild[T]) def findLastChild[T >: Null : ClassTag]: Option[PsiElement with T] = allChildrenReverse.collectFirst({ case t: T => t }) def allChildren: Iterator[PsiElement] = Iterator.iterate(getFirstChild)(_.getNextSibling).takeWhile(_ != null) def allChildrenReverse: Iterator[PsiElement] = Iterator.iterate(getLastChild)(_.getPrevSibling).takeWhile(_ != null) def prevSibling = Option(getPrevSibling) def prevSiblings: Iterator[PsiElement] = Iterator.iterate(getPrevSibling)(_.getPrevSibling).takeWhile(_ != null) def nextSibling = Option(getNextSibling) def nextSiblings: Iterator[PsiElement] = Iterator.iterate(getNextSibling)(_.getNextSibling).takeWhile(_ != null) def nonWhitespaceChildren: Iterator[PsiElement] = allChildren.filterNot(ch => ch.getNode.getElementType == TokenType.WHITE_SPACE) def nonWhitespaceOrCommentChildren: Iterator[PsiElement] = allChildren.filterNot(ch => (HoconTokenSets.Comment | TokenType.WHITE_SPACE).contains(ch.getNode.getElementType)) def findChildren[T <: HoconPsiElement : ClassTag]: Iterator[T] = allChildren.collect { case t: T => t } } sealed trait HInnerElement extends HoconPsiElement { type Parent <: HoconPsiElement } final class HObjectEntries(ast: ASTNode) extends HoconPsiElement(ast) with HScope { def forParent[T](forFile: HoconPsiFile => T, forObject: HObject => T): T = (parent: @unchecked) match { case Some(file: HoconPsiFile) => forFile(file) case Some(obj: HObject) => forObject(obj) } def isToplevel: Boolean = forParent(_ => true, _ => false) def prefixingField: Option[HValuedField] = forParent(_ => None, obj => obj.prefixingField) def entries: Iterator[HObjectEntry] = findChildren[HObjectEntry] def objectFields: Iterator[HObjectField] = findChildren[HObjectField] def includes: Iterator[HInclude] = findChildren[HInclude] def directKeyedFields: Iterator[HKeyedField] = objectFields.flatMap(_.directKeyedFields) } sealed trait HObjectEntry extends HoconPsiElement with HInnerElement { type Parent = HObjectEntries def previousEntry: Option[HObjectEntry] = prevSiblings.collectFirst({ case e: HObjectEntry => e }) def nextEntry: Option[HObjectEntry] = nextSiblings.collectFirst({ case e: HObjectEntry => e }) } final class HObjectField(ast: ASTNode) extends HoconPsiElement(ast) with HObjectEntry with HScope { def docComments: Iterator[PsiComment] = nonWhitespaceChildren .takeWhile(_.getNode.getElementType == HoconTokenType.HashComment) .map(ch => ch.asInstanceOf[PsiComment]) def keyedField: HKeyedField = getChild[HKeyedField] def endingValue: Option[HValue] = keyedField.endingField.endingValue def directKeyedFields = Iterator(keyedField) // there may be bound comments and text offset should be at the beginning of path override def getTextOffset: Int = keyedField.getTextOffset } sealed trait HKeyedField extends HoconPsiElement with HInnerElement with HScope { def forParent[T](forKeyedParent: HKeyedField => T, forObjectField: HObjectField => T): T = (parent: @unchecked) match { case Some(kf: HKeyedField) => forKeyedParent(kf) case Some(of: HObjectField) => forObjectField(of) } def key: Option[HKey] = findChild[HKey] def validKey: Option[HKey] = key.filter(_.isValidKey) /** * Goes up the tree in order to determine full path under which this keyed field is defined. * Stops when encounters file-toplevel entries or an array (including array-append field). * * @return stream of all encountered keyed fields (in bottom-up order, i.e. starting with itself) */ def fieldsInAllPathsBackward: Stream[HKeyedField] = this #:: forParent( keyedField => keyedField.fieldsInAllPathsBackward, objectField => objectField.parent.map(_.forParent( _ => Stream.empty, obj => obj.prefixingField.map(_.fieldsInAllPathsBackward).getOrElse(Stream.empty) )).get ) /** * Like [[fieldsInAllPathsBackward]] but returns [[HKey]]s instead of [[HKeyedField]]s, in reverse order (i.e. key * from this field is at the end) and ensures that all keys are valid. If not, [[None]] is returned. */ def keysInAllPaths: Option[List[HKey]] = { def iterate(str: Stream[HKeyedField], acc: List[HKey]): Option[List[HKey]] = str match { case head #:: tail => head.validKey.flatMap(key => iterate(tail, key :: acc)) case _ => Some(acc) } iterate(fieldsInAllPathsBackward, Nil) } def enclosingObjectField: HObjectField = forParent(keyedParent => keyedParent.enclosingObjectField, objectField => objectField) def enclosingEntries: HObjectEntries = enclosingObjectField.parent.get def fieldsInPathForward: Stream[HKeyedField] def fieldsInPathBackward: Stream[HKeyedField] = forParent(keyedField => this #:: keyedField.fieldsInPathBackward, _ => Stream(this)) def startingField: HKeyedField = forParent(_.startingField, _ => this) def endingField: HValuedField def endingValue: Option[HValue] = endingField.value /** * Scopes present in whatever is on the right side of key in that keyed field. */ def subScopes: Iterator[HScope] def directKeyedFields = Iterator(this) } final class HPrefixedField(ast: ASTNode) extends HoconPsiElement(ast) with HKeyedField { def subField: HKeyedField = getChild[HKeyedField] def fieldsInPathForward: Stream[HKeyedField] = this #:: subField.fieldsInPathForward def endingField: HValuedField = subField.endingField def subScopes = Iterator(subField) } final class HValuedField(ast: ASTNode) extends HoconPsiElement(ast) with HKeyedField { def value: Option[HValue] = findChild[HValue] def isArrayAppend: Boolean = separator.contains(HoconTokenType.PlusEquals) def separator: Option[HoconTokenType] = Option(findChildByType[PsiElement](HoconTokenSets.KeyValueSeparator)) .map(_.getNode.getElementType.asInstanceOf[HoconTokenType]) def fieldsInPathForward: Stream[HKeyedField] = Stream(this) def endingField: HValuedField = this def subScopes: Iterator[HObject] = if (isArrayAppend) Iterator.empty else value.collect { case obj: HObject => Iterator(obj) case conc: HConcatenation => conc.findChildren[HObject] }.getOrElse(Iterator.empty) } final class HInclude(ast: ASTNode) extends HoconPsiElement(ast) with HObjectEntry { def included: HQualifiedIncluded = getChild[HQualifiedIncluded] // there may be bound comments and text offset should be on 'include' keyword override def getTextOffset: Int = allChildren.find(_.getNode.getElementType == HoconTokenType.UnquotedChars) .map(_.getTextOffset).getOrElse(super.getTextOffset) } final class HIncluded(ast: ASTNode) extends HoconPsiElement(ast) with HInnerElement { type Parent = HInclude def required: Boolean = getFirstChild.getNode.getElementType == HoconTokenType.UnquotedChars && getFirstChild.getText == HoconConstants.RequiredModifer def qualified: Option[HQualifiedIncluded] = findChild[HQualifiedIncluded] def target: Option[HIncludeTarget] = qualified.flatMap(_.target) } final class HQualifiedIncluded(ast: ASTNode) extends HoconPsiElement(ast) with HInnerElement { type Parent = HIncluded def qualifier: Option[String] = getFirstChild.getNode.getElementType match { case HoconTokenType.UnquotedChars => Some(getFirstChild.getText) case _ => None } def target: Option[HIncludeTarget] = findChild[HIncludeTarget] def fileReferenceSet: Option[IncludedFileReferenceSet] = for { hs <- target vf <- Option(getContainingFile.getOriginalFile.getVirtualFile) rs <- { val strVal = hs.stringValue val (absolute, forcedAbsolute, fromClasspath) = qualifier match { case Some(ClasspathModifier) => (true, true, true) case None if !isValidUrl(strVal) => val pfi = ProjectRootManager.getInstance(getProject).getFileIndex val fromClasspath = pfi.isInSource(vf) || pfi.isInLibraryClasses(vf) (strVal.trim.startsWith("/"), false, fromClasspath) case _ => (true, true, false) } // Include resolution is enabled for: // - classpath() includes anywhere // - unqualified includes in classpath (source or library) files // - relative unqualified includes in non-classpath files if (!absolute || fromClasspath) Some(new IncludedFileReferenceSet(strVal, hs, forcedAbsolute, fromClasspath)) else None } } yield rs } final class HKey(ast: ASTNode) extends HoconPsiElement(ast) with HInnerElement { def forParent[T](forPath: HPath => T, forKeyedField: HKeyedField => T): T = (parent: @unchecked) match { case Some(path: HPath) => forPath(path) case Some(keyedField: HKeyedField) => forKeyedField(keyedField) } def allKeysFromToplevel: Option[List[HKey]] = forParent( path => path.allKeys, keyedEntry => keyedEntry.keysInAllPaths ) def enclosingEntries: HObjectEntries = forParent( _ => getContainingFile.toplevelEntries, keyedField => keyedField.enclosingEntries ) def stringValue: String = allChildren.collect { case keyPart: HKeyPart => keyPart.stringValue case other => other.getText }.mkString def keyParts: Iterator[HKeyPart] = findChildren[HKeyPart] def isValidKey: Boolean = findChild[PsiErrorElement].isEmpty override def getReference = new HKeySelfReference(this) } final class HPath(ast: ASTNode) extends HoconPsiElement(ast) with HInnerElement { def forParent[T](forPath: HPath => T, forSubstitution: HSubstitution => T): T = (parent: @unchecked) match { case Some(path: HPath) => forPath(path) case Some(subst: HSubstitution) => forSubstitution(subst) } def allPaths: List[HPath] = { def allPathsIn(path: HPath, acc: List[HPath]): List[HPath] = path.prefix.map(prePath => allPathsIn(prePath, path :: acc)).getOrElse(path :: acc) allPathsIn(this, Nil) } /** * Some(all keys in this path) or None if there's an invalid key in path. */ def allKeys: Option[List[HKey]] = { def allKeysIn(path: HPath, acc: List[HKey]): Option[List[HKey]] = path.validKey.flatMap(key => path.prefix .map(prePath => allKeysIn(prePath, key :: acc)) .getOrElse(Some(key :: acc))) allKeysIn(this, Nil) } /** * If all keys are valid - all keys of this path. * If some keys are invalid - all valid keys from left to right until some invalid key is encountered * (i.e. longest valid prefix path) */ def startingValidKeys: List[HKey] = allPaths.iterator.takeWhile(_.validKey.nonEmpty).flatMap(_.validKey).toList def startingPath: HPath = prefix.map(_.startingPath).getOrElse(this) def prefix: Option[HPath] = findChild[HPath] def validKey: Option[HKey] = findChild[HKey].filter(_.isValidKey) } sealed trait HValue extends HoconPsiElement with HInnerElement { def forParent[T](forValuedField: HValuedField => Option[T], forArray: HArray => Option[T], forConcatenation: HConcatenation => Option[T]): Option[T] = parent match { case Some(vf: HValuedField) => forValuedField(vf) case Some(arr: HArray) => forArray(arr) case Some(conc: HConcatenation) => forConcatenation(conc) case _ => None } def prefixingField: Option[HValuedField] = forParent( vf => if (vf.isArrayAppend) None else Some(vf), _ => None, concat => concat.prefixingField ) } final class HObject(ast: ASTNode) extends HoconPsiElement(ast) with HValue with HScope { def entries: HObjectEntries = getChild[HObjectEntries] def directKeyedFields: Iterator[HKeyedField] = entries.directKeyedFields } final class HArray(ast: ASTNode) extends HoconPsiElement(ast) with HValue final class HSubstitution(ast: ASTNode) extends HoconPsiElement(ast) with HValue { def path: Option[HPath] = findChild[HPath] } final class HConcatenation(ast: ASTNode) extends HoconPsiElement(ast) with HValue sealed trait HLiteralValue extends HValue with PsiLiteralValue final class HNull(ast: ASTNode) extends HoconPsiElement(ast) with HLiteralValue { def getValue: Object = null } final class HBoolean(ast: ASTNode) extends HoconPsiElement(ast) with HLiteralValue { def getValue: Object = jl.Boolean.valueOf(booleanValue) def booleanValue: Boolean = getText.toBoolean } final class HNumber(ast: ASTNode) extends HoconPsiElement(ast) with HLiteralValue { def getValue: Object = numberValue def numberValue: jl.Number = if (getText.exists(HNumber.DecimalIndicators.contains)) jl.Double.parseDouble(getText) else jl.Long.parseLong(getText) } object HNumber { private final val DecimalIndicators = Set('.', 'e', 'E') } final class HUnquotedString(ast: ASTNode) extends HoconPsiElement(ast) sealed trait HString extends HInnerElement with PsiLiteralValue with ContributedReferenceHost { def stringType: IElementType = getFirstChild.getNode.getElementType def getValue: Object = stringValue def unquote: String = stringType match { case HoconTokenType.QuotedString => getText.substring(1, getText.length - (if (isClosed) 1 else 0)) case HoconTokenType.MultilineString => getText.substring(3, getText.length - (if (isClosed) 3 else 0)) case HoconElementType.UnquotedString => getText } def stringValue: String = stringType match { case HoconTokenType.QuotedString => StringUtil.unescapeStringCharacters(unquote) case _ => unquote } def isClosed: Boolean = stringType match { case HoconTokenType.QuotedString => HoconConstants.ProperlyClosedQuotedString.pattern.matcher(getText).matches case HoconTokenType.MultilineString => getText.endsWith("\\"\\"\\"") case _ => true } override def getReferences: Array[PsiReference] = PsiReferenceService.getService.getContributedReferences(this) } final class HStringValue(ast: ASTNode) extends HoconPsiElement(ast) with HString with HLiteralValue final class HKeyPart(ast: ASTNode) extends HoconPsiElement(ast) with HString { type Parent = HKey } final class HIncludeTarget(ast: ASTNode) extends HoconPsiElement(ast) with HString { type Parent = HQualifiedIncluded def getFileReferences: Array[FileReference] = parent.flatMap(_.fileReferenceSet.map(_.getAllReferences)).getOrElse(FileReference.EMPTY) }
ghik/intellij-hocon
src/org/jetbrains/plugins/hocon/psi/HoconPsiElement.scala
Scala
apache-2.0
16,042
package io.getquill.oracle import io.getquill.{ Prefix, ZioSpec } import zio.{ Task, ZIO } import io.getquill.context.ZioJdbc._ class ZioJdbcContextSpec extends ZioSpec { def prefix = Prefix("testOracleDB") val context = testContext import testContext._ "provides transaction support" - { "success" in { (for { _ <- testContext.run(qr1.delete) _ <- testContext.transaction { testContext.run(qr1.insert(_.i -> 33)) } r <- testContext.run(qr1) } yield r).runSyncUnsafe().map(_.i) mustEqual List(33) } "success - stream" in { (for { _ <- testContext.run(qr1.delete) seq <- testContext.transaction { for { _ <- testContext.run(qr1.insert(_.i -> 33)) s <- accumulateDS(testContext.stream(qr1)) } yield s } r <- testContext.run(qr1) } yield (seq.map(_.i), r.map(_.i))).runSyncUnsafe() mustEqual ((List(33), List(33))) } "failure" in { (for { _ <- testContext.run(qr1.delete) e <- testContext.underlying.transaction { import testContext.underlying._ ZIO.collectAll(Seq( testContext.underlying.run(qr1.insert(_.i -> 18)), Task { throw new IllegalStateException } )) }.catchSome { case e: Exception => Task(e.getClass.getSimpleName) }.onDataSource r <- testContext.run(qr1) } yield (e, r.isEmpty)).runSyncUnsafe() mustEqual (("IllegalStateException", true)) } "nested" in { (for { _ <- testContext.run(qr1.delete) _ <- testContext.underlying.transaction { import testContext.underlying._ testContext.underlying.transaction { testContext.underlying.run(qr1.insert(_.i -> 33)) } }.onDataSource r <- testContext.run(qr1) } yield r).runSyncUnsafe().map(_.i) mustEqual List(33) } "prepare" in { testContext.underlying.prepareParams( "select * from Person where name=? and age > ?", (ps, _) => (List("Sarah", 127), ps) ).onDataSource.runSyncUnsafe() mustEqual List("127", "'Sarah'") } } }
getquill/quill
quill-jdbc-zio/src/test/scala/io/getquill/oracle/ZioJdbcContextSpec.scala
Scala
apache-2.0
2,218
package com.avsystem.commons package macros.rpc import com.avsystem.commons.macros.meta.MacroMetadatas import com.avsystem.commons.macros.misc.{FailMsg, Ok, Res} private[commons] trait RpcMetadatas extends MacroMetadatas { this: RpcMacroCommons with RpcSymbols with RpcMappings => import c.universe._ class MethodMetadataParam(owner: MetadataConstructor, symbol: Symbol) extends MetadataParam(owner, symbol) with RealMethodTarget with ArityParam { def allowSingle: Boolean = true def allowOptional: Boolean = true def allowNamedMulti: Boolean = true def allowListedMulti: Boolean = true def baseTagSpecs: List[BaseTagSpec] = tagSpecs(MethodTagAT) if (!(typeGivenInstances <:< TypedMetadataType)) { reportProblem(s"method metadata type must be a subtype TypedMetadata[_]") } def mappingFor(matchedMethod: MatchedMethod): Res[MethodMetadataMapping] = for { mdType <- actualMetadataType(typeGivenInstances, matchedMethod.real.resultType, "method result type", verbatimResult) tree <- materializeOneOf(mdType) { t => val constructor = new MethodMetadataConstructor(t, this, this) for { newMatchedMethod <- constructor.matchTagsAndFilters(matchedMethod) paramMappings <- constructor.paramMappings(newMatchedMethod) typeParamMappings <- constructor.typeParamMappings(newMatchedMethod) tree <- constructor.tryMaterializeFor(newMatchedMethod, paramMappings, typeParamMappings) } yield { val tparams = matchedMethod.typeParamsInContext val res = withTypeParamInstances(tparams)(tree) q"..${matchedMethod.real.typeParamDecls}; ${stripTparamRefs(tparams.map(_.symbol))(res)}" } } } yield MethodMetadataMapping(matchedMethod, this, tree) def allowImplicitDepParams: Boolean = false } class TypeParamMetadataParam(owner: MetadataConstructor, symbol: Symbol) extends MetadataParam(owner, symbol) with RealTypeParamTarget { def baseTagSpecs: List[BaseTagSpec] = Nil //TODO: introduce `typeParamTag` just for the sake of consistency? private def metadataTree(matched: MatchedSymbol, realParam: RealTypeParam, indexInRaw: Int): Option[Res[Tree]] = matchRealTypeParam(matched, realParam, indexInRaw).toOption.map(metadataTree(_, indexInRaw)) private def metadataTree(matchedParam: MatchedTypeParam, indexInRaw: Int): Res[Tree] = { val realParam = matchedParam.real val result = for { tree <- materializeOneOf(arity.collectedType) { t => val constructor = new TypeParamMetadataConstructor(t, this, this, indexInRaw) for { newMatchedParam <- constructor.matchTagsAndFilters(matchedParam) tree <- constructor.tryMaterializeFor(newMatchedParam) } yield tree } } yield tree result.mapFailure(msg => s"${realParam.problemStr}:\\n$msg") } def metadataFor(matched: MatchedSymbol, parser: ParamsParser[RealTypeParam]): Res[Tree] = arity match { case _: ParamArity.Single => val errorMessage = unmatchedError.getOrElse(s"$shortDescription $pathStr was not matched by any real type parameter") parser.extractSingle(!auxiliary, metadataTree(matched, _, 0), errorMessage) case _: ParamArity.Optional => Ok(mkOptional(parser.extractOptional(!auxiliary, metadataTree(matched, _, 0)))) case ParamArity.Multi(_, true) => parser.extractMulti(!auxiliary, (rp, i) => matchRealTypeParam(matched, rp, i) .toOption.map(mp => metadataTree(mp, i).map(t => q"(${mp.rawName}, $t)"))).map(mkMulti(_)) case _: ParamArity.Multi => parser.extractMulti(!auxiliary, metadataTree(matched, _, _)).map(mkMulti(_)) } } class ParamMetadataParam(owner: MetadataConstructor, symbol: Symbol) extends MetadataParam(owner, symbol) with RealParamTarget { def baseTagSpecs: List[BaseTagSpec] = tagSpecs(ParamTagAT) if (!(typeGivenInstances <:< TypedMetadataType)) { reportProblem(s"type $typeGivenInstances is not a subtype of TypedMetadata[_]") } private def metadataTree(matchedMethod: MatchedMethod, realParam: RealParam, indexInRaw: Int): Option[Res[Tree]] = matchRealParam(matchedMethod, realParam, indexInRaw).toOption.map(metadataTree(_, indexInRaw)) private def metadataTree(matchedParam: MatchedParam, indexInRaw: Int): Res[Tree] = { val realParam = matchedParam.real val result = for { mdType <- actualMetadataType(typeGivenInstances, realParam.nonOptionalType, "parameter type", verbatim) tree <- materializeOneOf(mdType) { t => val constructor = new ParamMetadataConstructor(t, this, this, indexInRaw) for { newMatchedParam <- constructor.matchTagsAndFilters(matchedParam) tree <- constructor.tryMaterializeFor(newMatchedParam) } yield tree } } yield tree result.mapFailure(msg => s"${realParam.problemStr}:\\n$msg") } def metadataFor(matchedMethod: MatchedMethod, parser: ParamsParser[RealParam]): Res[Tree] = { val res = arity match { case _: ParamArity.Single => val errorMessage = unmatchedError.getOrElse(s"$shortDescription $pathStr was not matched by any real parameter") parser.extractSingle(!auxiliary, metadataTree(matchedMethod, _, 0), errorMessage) case _: ParamArity.Optional => Ok(mkOptional(parser.extractOptional(!auxiliary, metadataTree(matchedMethod, _, 0)))) case ParamArity.Multi(_, true) => parser.extractMulti(!auxiliary, (rp, i) => matchRealParam(matchedMethod, rp, i) .toOption.map(mp => metadataTree(mp, i).map(t => q"(${mp.rawName}, $t)"))).map(mkMulti(_)) case _: ParamArity.Multi => parser.extractMulti(!auxiliary, metadataTree(matchedMethod, _, _)).map(mkMulti(_)) } res.map(withTypeParamInstances(matchedMethod.typeParamsInContext)) } } case class MethodMetadataMapping(matchedMethod: MatchedMethod, mdParam: MethodMetadataParam, tree: Tree) { def collectedTree(named: Boolean): Tree = if (named) q"(${matchedMethod.rawName}, $tree)" else tree } class RpcApiMetadataConstructor(constructed: Type, ownerParam: Option[MetadataParam]) extends MetadataConstructor(constructed, ownerParam) with TagMatchingSymbol { def baseTagSpecs: List[BaseTagSpec] = Nil lazy val typeParamMdParams: List[TypeParamMetadataParam] = collectParams[TypeParamMetadataParam] lazy val methodMdParams: List[MethodMetadataParam] = collectParams[MethodMetadataParam] def abstractsTypeParams: Boolean = typeParamMdParams.nonEmpty override def paramByStrategy(paramSym: Symbol, annot: Annot, ownerConstr: MetadataConstructor): MetadataParam = if (annot.tpe <:< RpcMethodMetadataAT) new MethodMetadataParam(ownerConstr, paramSym) else if (annot.tpe <:< RpcTypeParamMetadataAT) new TypeParamMetadataParam(ownerConstr, paramSym) else super.paramByStrategy(paramSym, annot, ownerConstr) def compositeConstructor(param: CompositeParam): MetadataConstructor = new RpcApiMetadataConstructor(param.collectedType, Some(param)) def typeParamMappings(matched: RealRpcApi): Res[Map[TypeParamMetadataParam, Tree]] = collectParamMappings(matched.typeParams, typeParamMdParams, allowIncomplete)( (param, parser) => param.metadataFor(matched, parser).map(t => (param, t)), rp => s"no metadata parameter was found that would match ${rp.shortDescription} ${rp.nameStr}" ).map(_.toMap) def tryMaterializeFor(rpc: RealRpcApi): Res[Tree] = typeParamMappings(rpc).flatMap { tpMappings => val errorBase = unmatchedError.getOrElse(s"cannot materialize ${constructed.typeSymbol} for $rpc") val methodMappings = collectMethodMappings( methodMdParams, errorBase, rpc.realMethods, allowIncomplete )(_.mappingFor(_)).groupBy(_.mdParam) tryMaterialize(rpc) { case tpmp: TypeParamMetadataParam => Ok(tpMappings(tpmp)) case mmp: MethodMetadataParam => val mappings = methodMappings.getOrElse(mmp, Nil) mmp.arity match { case ParamArity.Single(_) => mappings match { case Nil => FailMsg(s"no real method found that would match ${mmp.description}") case List(m) => Ok(m.tree) case _ => FailMsg(s"multiple real methods match ${mmp.description}") } case ParamArity.Optional(_) => mappings match { case Nil => Ok(mmp.mkOptional[Tree](None)) case List(m) => Ok(mmp.mkOptional(Some(m.tree))) case _ => FailMsg(s"multiple real methods match ${mmp.description}") } case ParamArity.Multi(_, named) => Ok(mmp.mkMulti(mappings.map(_.collectedTree(named)))) case arity => FailMsg(s"${arity.annotStr} not allowed on method metadata params") } } } } class MethodMetadataConstructor( constructed: Type, val containingMethodParam: MethodMetadataParam, owner: MetadataParam ) extends MetadataConstructor(constructed, Some(owner)) { override def inheritFrom: Option[TagMatchingSymbol] = Some(containingMethodParam) def baseTagSpecs: List[BaseTagSpec] = tagSpecs(MethodTagAT) lazy val paramMdParams: List[ParamMetadataParam] = collectParams[ParamMetadataParam] lazy val typeParamMdParams: List[TypeParamMetadataParam] = collectParams[TypeParamMetadataParam] override def paramByStrategy(paramSym: Symbol, annot: Annot, ownerConstr: MetadataConstructor = this): MetadataParam = if (annot.tpe <:< ReifyParamListCountAT) new ParamListCountParam(ownerConstr, paramSym) else if (annot.tpe <:< ReifyPositionAT) new MethodPositionParam(ownerConstr, paramSym) else if (annot.tpe <:< ReifyFlagsAT) new MethodFlagsParam(ownerConstr, paramSym) else if (annot.tpe <:< RpcParamMetadataAT) new ParamMetadataParam(ownerConstr, paramSym) else if (annot.tpe <:< RpcTypeParamMetadataAT) new TypeParamMetadataParam(ownerConstr, paramSym) else super.paramByStrategy(paramSym, annot, ownerConstr) def compositeConstructor(param: CompositeParam): MetadataConstructor = new MethodMetadataConstructor(param.collectedType, containingMethodParam, param) def paramMappings(matchedMethod: MatchedMethod): Res[Map[ParamMetadataParam, Tree]] = collectParamMappings(matchedMethod.real.realParams, paramMdParams, allowIncomplete)( (param, parser) => param.metadataFor(matchedMethod, parser).map(t => (param, t)), rp => containingMethodParam.errorForUnmatchedParam(rp).getOrElse( s"no metadata parameter was found that would match ${rp.shortDescription} ${rp.nameStr}") ).map(_.toMap) def typeParamMappings(matchedMethod: MatchedMethod): Res[Map[TypeParamMetadataParam, Tree]] = collectParamMappings(matchedMethod.real.typeParams, typeParamMdParams, allowIncomplete)( (param, parser) => param.metadataFor(matchedMethod, parser).map(t => (param, t)), rp => containingMethodParam.errorForUnmatchedParam(rp).getOrElse( s"no metadata parameter was found that would match ${rp.shortDescription} ${rp.nameStr}") ).map(_.toMap) def tryMaterializeFor( matchedMethod: MatchedMethod, paramMappings: Map[ParamMetadataParam, Tree], typeParamMappings: Map[TypeParamMetadataParam, Tree] ): Res[Tree] = tryMaterialize(matchedMethod) { case pmp: ParamMetadataParam => Ok(paramMappings(pmp)) case tpmp: TypeParamMetadataParam => Ok(typeParamMappings(tpmp)) } } class TypeParamMetadataConstructor( constructed: Type, containingTypeParamMdParam: TypeParamMetadataParam, owner: MetadataParam, val indexInRaw: Int ) extends MetadataConstructor(constructed, Some(owner)) { override def inheritFrom: Option[TagMatchingSymbol] = Some(containingTypeParamMdParam) def baseTagSpecs: List[BaseTagSpec] = Nil override def paramByStrategy(paramSym: Symbol, annot: Annot, ownerConstr: MetadataConstructor): MetadataParam = annot.tpe match { // TODO: metadata for lower/upper bound or something? case _ => super.paramByStrategy(paramSym, annot, ownerConstr) } def compositeConstructor(param: CompositeParam): MetadataConstructor = new TypeParamMetadataConstructor(param.collectedType, containingTypeParamMdParam, param, indexInRaw) def tryMaterializeFor(matchedTypeParam: MatchedTypeParam): Res[Tree] = tryMaterialize(matchedTypeParam)(p => FailMsg(s"unexpected metadata parameter $p")) } class ParamMetadataConstructor( constructed: Type, containingParamMdParam: ParamMetadataParam, owner: MetadataParam, val indexInRaw: Int ) extends MetadataConstructor(constructed, Some(owner)) { override def inheritFrom: Option[TagMatchingSymbol] = Some(containingParamMdParam) def baseTagSpecs: List[BaseTagSpec] = tagSpecs(ParamTagAT) override def paramByStrategy(paramSym: Symbol, annot: Annot, ownerConstr: MetadataConstructor): MetadataParam = annot.tpe match { case t if t <:< ReifyPositionAT => new ParamPositionParam(ownerConstr, paramSym) case t if t <:< ReifyFlagsAT => new ParamFlagsParam(ownerConstr, paramSym) case _ => super.paramByStrategy(paramSym, annot, ownerConstr) } def compositeConstructor(param: CompositeParam): MetadataConstructor = new ParamMetadataConstructor(param.typeGivenInstances, containingParamMdParam, param, indexInRaw) def tryMaterializeFor(matchedParam: MatchedParam): Res[Tree] = tryMaterialize(matchedParam)(p => FailMsg(s"unexpected metadata parameter $p")) } class ParamListCountParam(owner: MetadataConstructor, symbol: Symbol) extends DirectMetadataParam(owner, symbol) { if (!(actualType =:= definitions.IntTpe)) { reportProblem("its type is not Int") } def tryMaterializeFor(matchedSymbol: MatchedSymbol): Res[Tree] = Ok(q"${matchedSymbol.real.symbol.asMethod.paramLists.length}") } class MethodPositionParam(owner: MetadataConstructor, symbol: Symbol) extends DirectMetadataParam(owner, symbol) { def tryMaterializeFor(matchedSymbol: MatchedSymbol): Res[Tree] = Ok(q"$MethodPositionObj(${matchedSymbol.index}, ${matchedSymbol.indexInRaw})") } class MethodFlagsParam(owner: MetadataConstructor, symbol: Symbol) extends DirectMetadataParam(owner, symbol) { if (!(actualType =:= MethodFlagsTpe)) { reportProblem("its type is not MethodFlags") } def tryMaterializeFor(matchedParam: MatchedSymbol): Res[Tree] = Ok { val rpcSym = matchedParam.real def flag(cond: Boolean, bit: Int) = if (cond) 1 << bit else 0 val s = rpcSym.symbol.asTerm val rawFlags = flag(s.isAbstract, 0) | flag(s.isFinal, 1) | flag(s.isLazy, 2) | flag(s.isGetter, 3) | flag(s.isSetter, 4) | flag(s.isVar, 5) q"new $MethodFlagsTpe($rawFlags)" } } }
AVSystem/scala-commons
commons-macros/src/main/scala/com/avsystem/commons/macros/rpc/RpcMetadatas.scala
Scala
mit
15,130
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.storage import scala.collection.generic.CanBuildFrom import scala.collection.immutable.Iterable import scala.concurrent.Future import org.apache.spark.{SparkConf, SparkException} import org.apache.spark.internal.Logging import org.apache.spark.rpc.RpcEndpointRef import org.apache.spark.storage.BlockManagerMessages._ import org.apache.spark.util.{RpcUtils, ThreadUtils} private[spark] class BlockManagerMaster( var driverEndpoint: RpcEndpointRef, var driverHeartbeatEndPoint: RpcEndpointRef, conf: SparkConf, isDriver: Boolean) extends Logging { val timeout = RpcUtils.askRpcTimeout(conf) /** Remove a dead executor from the driver endpoint. This is only called on the driver side. */ def removeExecutor(execId: String): Unit = { tell(RemoveExecutor(execId)) logInfo("Removed " + execId + " successfully in removeExecutor") } /** Decommission block managers corresponding to given set of executors * Non-blocking. */ def decommissionBlockManagers(executorIds: Seq[String]): Unit = { driverEndpoint.ask[Boolean](DecommissionBlockManagers(executorIds)) } /** Get Replication Info for all the RDD blocks stored in given blockManagerId */ def getReplicateInfoForRDDBlocks(blockManagerId: BlockManagerId): Seq[ReplicateBlock] = { driverEndpoint.askSync[Seq[ReplicateBlock]](GetReplicateInfoForRDDBlocks(blockManagerId)) } /** Request removal of a dead executor from the driver endpoint. * This is only called on the driver side. Non-blocking */ def removeExecutorAsync(execId: String): Unit = { driverEndpoint.ask[Boolean](RemoveExecutor(execId)) logInfo("Removal of executor " + execId + " requested") } /** * Register the BlockManager's id with the driver. The input BlockManagerId does not contain * topology information. This information is obtained from the master and we respond with an * updated BlockManagerId fleshed out with this information. */ def registerBlockManager( id: BlockManagerId, localDirs: Array[String], maxOnHeapMemSize: Long, maxOffHeapMemSize: Long, storageEndpoint: RpcEndpointRef): BlockManagerId = { logInfo(s"Registering BlockManager $id") val updatedId = driverEndpoint.askSync[BlockManagerId]( RegisterBlockManager(id, localDirs, maxOnHeapMemSize, maxOffHeapMemSize, storageEndpoint)) logInfo(s"Registered BlockManager $updatedId") updatedId } def updateBlockInfo( blockManagerId: BlockManagerId, blockId: BlockId, storageLevel: StorageLevel, memSize: Long, diskSize: Long): Boolean = { val res = driverEndpoint.askSync[Boolean]( UpdateBlockInfo(blockManagerId, blockId, storageLevel, memSize, diskSize)) logDebug(s"Updated info of block $blockId") res } /** Get locations of the blockId from the driver */ def getLocations(blockId: BlockId): Seq[BlockManagerId] = { driverEndpoint.askSync[Seq[BlockManagerId]](GetLocations(blockId)) } /** Get locations as well as status of the blockId from the driver */ def getLocationsAndStatus( blockId: BlockId, requesterHost: String): Option[BlockLocationsAndStatus] = { driverEndpoint.askSync[Option[BlockLocationsAndStatus]]( GetLocationsAndStatus(blockId, requesterHost)) } /** Get locations of multiple blockIds from the driver */ def getLocations(blockIds: Array[BlockId]): IndexedSeq[Seq[BlockManagerId]] = { driverEndpoint.askSync[IndexedSeq[Seq[BlockManagerId]]]( GetLocationsMultipleBlockIds(blockIds)) } /** * Check if block manager master has a block. Note that this can be used to check for only * those blocks that are reported to block manager master. */ def contains(blockId: BlockId): Boolean = { !getLocations(blockId).isEmpty } /** Get ids of other nodes in the cluster from the driver */ def getPeers(blockManagerId: BlockManagerId): Seq[BlockManagerId] = { driverEndpoint.askSync[Seq[BlockManagerId]](GetPeers(blockManagerId)) } /** * Get a list of unique shuffle service locations where an executor is successfully * registered in the past for block push/merge with push based shuffle. */ def getShufflePushMergerLocations( numMergersNeeded: Int, hostsToFilter: Set[String]): Seq[BlockManagerId] = { driverEndpoint.askSync[Seq[BlockManagerId]]( GetShufflePushMergerLocations(numMergersNeeded, hostsToFilter)) } /** * Remove the host from the candidate list of shuffle push mergers. This can be * triggered if there is a FetchFailedException on the host * @param host */ def removeShufflePushMergerLocation(host: String): Unit = { driverEndpoint.askSync[Seq[BlockManagerId]](RemoveShufflePushMergerLocation(host)) } def getExecutorEndpointRef(executorId: String): Option[RpcEndpointRef] = { driverEndpoint.askSync[Option[RpcEndpointRef]](GetExecutorEndpointRef(executorId)) } /** * Remove a block from the storage endpoints that have it. This can only be used to remove * blocks that the driver knows about. */ def removeBlock(blockId: BlockId): Unit = { driverEndpoint.askSync[Boolean](RemoveBlock(blockId)) } /** Remove all blocks belonging to the given RDD. */ def removeRdd(rddId: Int, blocking: Boolean): Unit = { val future = driverEndpoint.askSync[Future[Seq[Int]]](RemoveRdd(rddId)) future.failed.foreach(e => logWarning(s"Failed to remove RDD $rddId - ${e.getMessage}", e) )(ThreadUtils.sameThread) if (blocking) { // the underlying Futures will timeout anyway, so it's safe to use infinite timeout here RpcUtils.INFINITE_TIMEOUT.awaitResult(future) } } /** Remove all blocks belonging to the given shuffle. */ def removeShuffle(shuffleId: Int, blocking: Boolean): Unit = { val future = driverEndpoint.askSync[Future[Seq[Boolean]]](RemoveShuffle(shuffleId)) future.failed.foreach(e => logWarning(s"Failed to remove shuffle $shuffleId - ${e.getMessage}", e) )(ThreadUtils.sameThread) if (blocking) { // the underlying Futures will timeout anyway, so it's safe to use infinite timeout here RpcUtils.INFINITE_TIMEOUT.awaitResult(future) } } /** Remove all blocks belonging to the given broadcast. */ def removeBroadcast(broadcastId: Long, removeFromMaster: Boolean, blocking: Boolean): Unit = { val future = driverEndpoint.askSync[Future[Seq[Int]]]( RemoveBroadcast(broadcastId, removeFromMaster)) future.failed.foreach(e => logWarning(s"Failed to remove broadcast $broadcastId" + s" with removeFromMaster = $removeFromMaster - ${e.getMessage}", e) )(ThreadUtils.sameThread) if (blocking) { // the underlying Futures will timeout anyway, so it's safe to use infinite timeout here RpcUtils.INFINITE_TIMEOUT.awaitResult(future) } } /** * Return the memory status for each block manager, in the form of a map from * the block manager's id to two long values. The first value is the maximum * amount of memory allocated for the block manager, while the second is the * amount of remaining memory. */ def getMemoryStatus: Map[BlockManagerId, (Long, Long)] = { if (driverEndpoint == null) return Map.empty driverEndpoint.askSync[Map[BlockManagerId, (Long, Long)]](GetMemoryStatus) } def getStorageStatus: Array[StorageStatus] = { if (driverEndpoint == null) return Array.empty driverEndpoint.askSync[Array[StorageStatus]](GetStorageStatus) } /** * Return the block's status on all block managers, if any. NOTE: This is a * potentially expensive operation and should only be used for testing. * * If askStorageEndpoints is true, this invokes the master to query each block manager for the * most updated block statuses. This is useful when the master is not informed of the given block * by all block managers. */ def getBlockStatus( blockId: BlockId, askStorageEndpoints: Boolean = true): Map[BlockManagerId, BlockStatus] = { val msg = GetBlockStatus(blockId, askStorageEndpoints) /* * To avoid potential deadlocks, the use of Futures is necessary, because the master endpoint * should not block on waiting for a block manager, which can in turn be waiting for the * master endpoint for a response to a prior message. */ val response = driverEndpoint. askSync[Map[BlockManagerId, Future[Option[BlockStatus]]]](msg) val (blockManagerIds, futures) = response.unzip implicit val sameThread = ThreadUtils.sameThread val cbf = implicitly[ CanBuildFrom[Iterable[Future[Option[BlockStatus]]], Option[BlockStatus], Iterable[Option[BlockStatus]]]] val blockStatus = timeout.awaitResult( Future.sequence(futures)(cbf, ThreadUtils.sameThread)) if (blockStatus == null) { throw new SparkException("BlockManager returned null for BlockStatus query: " + blockId) } blockManagerIds.zip(blockStatus).flatMap { case (blockManagerId, status) => status.map { s => (blockManagerId, s) } }.toMap } /** * Return a list of ids of existing blocks such that the ids match the given filter. NOTE: This * is a potentially expensive operation and should only be used for testing. * * If askStorageEndpoints is true, this invokes the master to query each block manager for the * most updated block statuses. This is useful when the master is not informed of the given block * by all block managers. */ def getMatchingBlockIds( filter: BlockId => Boolean, askStorageEndpoints: Boolean): Seq[BlockId] = { val msg = GetMatchingBlockIds(filter, askStorageEndpoints) val future = driverEndpoint.askSync[Future[Seq[BlockId]]](msg) timeout.awaitResult(future) } /** Stop the driver endpoint, called only on the Spark driver node */ def stop(): Unit = { if (driverEndpoint != null && isDriver) { tell(StopBlockManagerMaster) driverEndpoint = null if (driverHeartbeatEndPoint.askSync[Boolean](StopBlockManagerMaster)) { driverHeartbeatEndPoint = null } else { logWarning("Failed to stop BlockManagerMasterHeartbeatEndpoint") } logInfo("BlockManagerMaster stopped") } } /** Send a one-way message to the master endpoint, to which we expect it to reply with true. */ private def tell(message: Any): Unit = { if (!driverEndpoint.askSync[Boolean](message)) { throw new SparkException("BlockManagerMasterEndpoint returned false, expected true.") } } } private[spark] object BlockManagerMaster { val DRIVER_ENDPOINT_NAME = "BlockManagerMaster" val DRIVER_HEARTBEAT_ENDPOINT_NAME = "BlockManagerMasterHeartbeat" }
maropu/spark
core/src/main/scala/org/apache/spark/storage/BlockManagerMaster.scala
Scala
apache-2.0
11,572
/** * Copyright 2015, deepsense.io * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.deepsense.deeplang.catalogs.doperable.exceptions import io.deepsense.deeplang.exceptions.DeepLangException abstract class DOperableCatalogException(message: String) extends DeepLangException(message)
deepsense-io/seahorse-workflow-executor
deeplang/src/main/scala/io/deepsense/deeplang/catalogs/doperable/exceptions/DOperableCatalogException.scala
Scala
apache-2.0
812