code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
package com.scalaAsm.x86
package Instructions
package General
// Description: Set Interrupt Flag
// Category: general/flgctrl
trait STI extends InstructionDefinition {
val mnemonic = "STI"
}
object STI extends ZeroOperands[STI] with STIImpl
trait STIImpl extends STI {
implicit object _0 extends NoOp{
val opcode: OneOpcode = 0xFB
}
}
|
bdwashbu/scala-x86-inst
|
src/main/scala/com/scalaAsm/x86/Instructions/General/STI.scala
|
Scala
|
apache-2.0
| 353 |
package mcoffin.rogue.wpi.patcher
import java.io.BufferedOutputStream
import java.io.ByteArrayInputStream
import java.io.File
import java.io.FileOutputStream
import java.io.InputStream
import java.nio.file.{Files, Paths}
import java.net.URL
import java.net.URLClassLoader
import java.util.LinkedList
import java.util.jar._
import org.objectweb.asm.ClassReader
import org.objectweb.asm.ClassWriter
import org.objectweb.asm.tree._
import scala.collection.JavaConversions._
object Patcher extends App {
val ROBOT_BASE_CLASS = "edu.wpi.first.wpilibj.RobotBase"
implicit class RobotBaseClassNode(val classNode: ClassNode) {
lazy val constructorInstructions = {
import org.objectweb.asm.Opcodes._
val insnList = new InsnList
val instructions = Seq(
new VarInsnNode(ALOAD, 0),
new MethodInsnNode(INVOKESPECIAL, "java/lang/Object", "<init>", "()V", false),
new InsnNode(RETURN))
instructions.foreach(insnList.add(_))
insnList
}
def patchRobotBase {
import org.objectweb.asm.Opcodes._
implicit class FieldNodeExtension(val fn: FieldNode) {
def ensureVisibleAnnotations {
if (fn.visibleAnnotations == null) {
fn.visibleAnnotations = new LinkedList
}
}
}
val methods = classNode.methods.map(m => m.asInstanceOf[MethodNode])
val initMethods = methods.filter(m => m.name.equals("<init>"))
initMethods.foreach(initMethod => {
initMethod.instructions = constructorInstructions
})
val fields = classNode.fields.map(f => f.asInstanceOf[FieldNode])
val driverStationFields = fields.filter(f => f.desc.equals("Ledu/wpi/first/wpilibj/DriverStation;"))
driverStationFields.foreach(dsField => {
dsField.ensureVisibleAnnotations
val f = dsField.asInstanceOf[FieldNode]
f.access = ACC_PUBLIC
val newAnnotations: LinkedList[AnnotationNode] = new LinkedList
f.visibleAnnotations.map(a => a.asInstanceOf[AnnotationNode]).foreach(newAnnotations.add(_))
if (!newAnnotations.add(new AnnotationNode("Lcom/google/inject/Inject;"))) {
throw new RuntimeException(s"Unable to add annotation to field: ${dsField}!")
} else {
println(s"Added annotation to field: ${dsField}")
}
f.visibleAnnotations = newAnnotations
})
}
}
private[Patcher] def jarFile = new File(args(0))
def pathForClassName(name: String) = {
val p = name.replace('.', '/')
s"${p}.class"
}
val classPath = pathForClassName(ROBOT_BASE_CLASS)
val classLoader = {
val jarURL = {
val f = jarFile
f.toURI.toURL
}
println(s"Loading jar: ${jarURL}")
new URLClassLoader(Array(jarURL))
}
private[Patcher] def classStream = classLoader.getResourceAsStream(classPath)
val classNode = {
println(s"Loading class at path: ${classPath}")
val classReader = new ClassReader(classStream)
val cn = new ClassNode
classReader.accept(cn, 0)
cn
}
classNode.patchRobotBase
val classWriter = {
val cw = new ClassWriter(ClassWriter.COMPUTE_MAXS | ClassWriter.COMPUTE_FRAMES)
classNode.accept(cw)
cw
}
private[Patcher] def writeOutput {
val bytes = classWriter.toByteArray()
val sourceJar = new JarFile(jarFile)
val jarOutputStream = {
val outputStream = new BufferedOutputStream(new FileOutputStream(args(1)))
val jos = new JarOutputStream(outputStream)
jos
}
def writeJarEntry(e: JarEntry, inputStream: InputStream) {
jarOutputStream.putNextEntry(e)
jarOutputStream.write(Stream.continually(inputStream.read()).takeWhile(_ != -1).map(_.toByte).toArray)
jarOutputStream.closeEntry
}
try {
sourceJar.entries.filter(e => !e.getName.equals(classPath)).foreach(e => writeJarEntry(e, sourceJar.getInputStream(e)))
val robotBaseEntry = new JarEntry(classPath)
writeJarEntry(robotBaseEntry, new ByteArrayInputStream(classWriter.toByteArray()))
jarOutputStream.flush()
} finally {
jarOutputStream.close()
}
}
writeOutput
}
|
mcoffin/rogue
|
wpilib-patcher/src/main/scala/mcoffin/rogue/wpi/patcher/Patcher.scala
|
Scala
|
apache-2.0
| 4,141 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest.featurespec
import org.scalatest.SharedHelpers.{EventRecordingReporter, thisLineNumber}
import scala.concurrent.{Promise, ExecutionContext, Future}
import org.scalatest.concurrent.SleepHelper
import org.scalatest.events.{InfoProvided, MarkupProvided}
import org.scalatest.exceptions.{DuplicateTestNameException, NotAllowedException}
import org.scalactic.Prettifier
import scala.util.Success
import org.scalatest.featurespec.{ AsyncFeatureSpec, AsyncFeatureSpecLike }
import org.scalatest.funspec
import org.scalatest.ParallelTestExecution
import org.scalatest.Args
import org.scalatest.Assertion
import org.scalatest.Succeeded
import org.scalatest.FailureMessages
import org.scalatest.UnquotedString
class FixtureAsyncFeatureSpecSpec extends funspec.AnyFunSpec {
private val prettifier = Prettifier.default
describe("AsyncFeatureSpec") {
// ParallelTestExecution not working yet.
it("can be used for tests that return Future under parallel async test execution") {
class ExampleSpec extends AsyncFeatureSpec with ParallelTestExecution {
//SCALATESTJS-ONLY implicit override def executionContext = org.scalatest.concurrent.TestExecutionContext.runNow
val a = 1
Scenario("test 1") {
Future {
assert(a == 1)
}
}
Scenario("test 2") {
Future {
assert(a == 2)
}
}
Scenario("test 3") {
Future {
pending
}
}
Scenario("test 4") {
Future {
cancel()
}
}
ignore("test 5") {
Future {
cancel()
}
}
override def newInstance = new ExampleSpec
}
val rep = new EventRecordingReporter
val spec = new ExampleSpec
val status = spec.run(None, Args(reporter = rep))
// SKIP-SCALATESTJS,NATIVE-START
status.waitUntilCompleted()
// SKIP-SCALATESTJS,NATIVE-END
assert(rep.testStartingEventsReceived.length == 4)
assert(rep.testSucceededEventsReceived.length == 1)
assert(rep.testSucceededEventsReceived(0).testName == "Scenario: test 1")
assert(rep.testFailedEventsReceived.length == 1)
assert(rep.testFailedEventsReceived(0).testName == "Scenario: test 2")
assert(rep.testPendingEventsReceived.length == 1)
assert(rep.testPendingEventsReceived(0).testName == "Scenario: test 3")
assert(rep.testCanceledEventsReceived.length == 1)
assert(rep.testCanceledEventsReceived(0).testName == "Scenario: test 4")
assert(rep.testIgnoredEventsReceived.length == 1)
assert(rep.testIgnoredEventsReceived(0).testName == "Scenario: test 5")
}
it("can be used for tests that did not return Future under parallel async test execution") {
class ExampleSpec extends AsyncFeatureSpec with ParallelTestExecution {
//SCALATESTJS-ONLY implicit override def executionContext = org.scalatest.concurrent.TestExecutionContext.runNow
val a = 1
Scenario("test 1") {
assert(a == 1)
}
Scenario("test 2") {
assert(a == 2)
}
Scenario("test 3") {
pending
}
Scenario("test 4") {
cancel()
}
ignore("test 5") {
cancel()
}
override def newInstance = new ExampleSpec
}
val rep = new EventRecordingReporter
val spec = new ExampleSpec
val status = spec.run(None, Args(reporter = rep))
// SKIP-SCALATESTJS,NATIVE-START
status.waitUntilCompleted()
// SKIP-SCALATESTJS,NATIVE-END
assert(rep.testStartingEventsReceived.length == 4)
assert(rep.testSucceededEventsReceived.length == 1)
assert(rep.testSucceededEventsReceived(0).testName == "Scenario: test 1")
assert(rep.testFailedEventsReceived.length == 1)
assert(rep.testFailedEventsReceived(0).testName == "Scenario: test 2")
assert(rep.testPendingEventsReceived.length == 1)
assert(rep.testPendingEventsReceived(0).testName == "Scenario: test 3")
assert(rep.testCanceledEventsReceived.length == 1)
assert(rep.testCanceledEventsReceived(0).testName == "Scenario: test 4")
assert(rep.testIgnoredEventsReceived.length == 1)
assert(rep.testIgnoredEventsReceived(0).testName == "Scenario: test 5")
}
it("should run tests that return Future in serial by default") {
@volatile var count = 0
class ExampleSpec extends AsyncFeatureSpec {
//SCALATESTJS-ONLY implicit override def executionContext = org.scalatest.concurrent.TestExecutionContext.runNow
Scenario("test 1") {
Future {
SleepHelper.sleep(30)
assert(count == 0)
count = 1
succeed
}
}
Scenario("test 2") {
Future {
assert(count == 1)
SleepHelper.sleep(50)
count = 2
succeed
}
}
Scenario("test 3") {
Future {
assert(count == 2)
}
}
}
val rep = new EventRecordingReporter
val suite = new ExampleSpec
val status = suite.run(None, Args(reporter = rep))
// SKIP-SCALATESTJS,NATIVE-START
status.waitUntilCompleted()
// SKIP-SCALATESTJS,NATIVE-END
assert(rep.testStartingEventsReceived.length == 3)
assert(rep.testSucceededEventsReceived.length == 3)
}
it("should run tests that does not return Future in serial by default") {
@volatile var count = 0
class ExampleSpec extends AsyncFeatureSpec {
//SCALATESTJS-ONLY implicit override def executionContext = org.scalatest.concurrent.TestExecutionContext.runNow
Scenario("test 1") {
SleepHelper.sleep(3000)
assert(count == 0)
count = 1
succeed
}
Scenario("test 2") {
assert(count == 1)
SleepHelper.sleep(5000)
count = 2
succeed
}
Scenario("test 3") {
assert(count == 2)
}
}
val rep = new EventRecordingReporter
val suite = new ExampleSpec
val status = suite.run(None, Args(reporter = rep))
// SKIP-SCALATESTJS,NATIVE-START
status.waitUntilCompleted()
// SKIP-SCALATESTJS,NATIVE-END
assert(rep.testStartingEventsReceived.length == 3)
assert(rep.testSucceededEventsReceived.length == 3)
}
// SKIP-SCALATESTJS,NATIVE-START
it("should run tests and its future in same main thread when using SerialExecutionContext") {
var mainThread = Thread.currentThread
var test1Thread: Option[Thread] = None
var test2Thread: Option[Thread] = None
var onCompleteThread: Option[Thread] = None
class ExampleSpec extends AsyncFeatureSpec {
Scenario("test 1") {
Future {
test1Thread = Some(Thread.currentThread)
succeed
}
}
Scenario("test 2") {
Future {
test2Thread = Some(Thread.currentThread)
succeed
}
}
}
val rep = new EventRecordingReporter
val suite = new ExampleSpec
val status = suite.run(None, Args(reporter = rep))
status.whenCompleted { s =>
onCompleteThread = Some(Thread.currentThread)
}
status.waitUntilCompleted()
assert(test1Thread.isDefined)
assert(test1Thread.get == mainThread)
assert(test2Thread.isDefined)
assert(test2Thread.get == mainThread)
assert(onCompleteThread.isDefined)
assert(onCompleteThread.get == mainThread)
}
it("should run tests and its true async future in the same thread when use SerialExecutionContext") {
var mainThread = Thread.currentThread
@volatile var test1Thread: Option[Thread] = None
@volatile var test2Thread: Option[Thread] = None
var onCompleteThread: Option[Thread] = None
class ExampleSpec extends AsyncFeatureSpec {
Scenario("test 1") {
val promise = Promise[Assertion]
val timer = new java.util.Timer
timer.schedule(
new java.util.TimerTask {
def run(): Unit = {
promise.complete(Success(succeed))
}
},
1000
)
promise.future.map { s =>
test1Thread = Some(Thread.currentThread)
s
}
}
Scenario("test 2") {
val promise = Promise[Assertion]
val timer = new java.util.Timer
timer.schedule(
new java.util.TimerTask {
def run(): Unit = {
promise.complete(Success(succeed))
}
},
500
)
promise.future.map { s =>
test2Thread = Some(Thread.currentThread)
s
}
}
}
val rep = new EventRecordingReporter
val suite = new ExampleSpec
val status = suite.run(None, Args(reporter = rep))
status.whenCompleted { s =>
onCompleteThread = Some(Thread.currentThread)
}
status.waitUntilCompleted()
assert(test1Thread.isDefined)
assert(test1Thread.get == mainThread)
assert(test2Thread.isDefined)
assert(test2Thread.get == mainThread)
assert(onCompleteThread.isDefined)
assert(onCompleteThread.get == mainThread)
}
it("should not run out of stack space with nested futures when using SerialExecutionContext") {
class ExampleSpec extends AsyncFeatureSpec {
// Note we get a StackOverflowError with the following execution
// context.
// override implicit def executionContext: ExecutionContext = new ExecutionContext { def execute(runnable: Runnable) = runnable.run; def reportFailure(cause: Throwable) = () }
def sum(xs: List[Int]): Future[Int] =
xs match {
case Nil => Future.successful(0)
case x :: xs => Future(x).flatMap(xx => sum(xs).map(xxx => xx + xxx))
}
Scenario("test 1") {
val fut: Future[Int] = sum((1 to 50000).toList)
fut.map(total => assert(total == 1250025000))
}
}
val rep = new EventRecordingReporter
val suite = new ExampleSpec
val status = suite.run(None, Args(reporter = rep))
status.waitUntilCompleted()
assert(!rep.testSucceededEventsReceived.isEmpty)
}
// SKIP-SCALATESTJS,NATIVE-END
it("should run tests that returns Future and report their result in serial") {
class ExampleSpec extends AsyncFeatureSpec {
//SCALATESTJS-ONLY implicit override def executionContext = org.scalatest.concurrent.TestExecutionContext.runNow
Scenario("test 1") {
Future {
SleepHelper.sleep(60)
succeed
}
}
Scenario("test 2") {
Future {
SleepHelper.sleep(30)
succeed
}
}
Scenario("test 3") {
Future {
succeed
}
}
}
val rep = new EventRecordingReporter
val suite = new ExampleSpec
val status = suite.run(None, Args(reporter = rep))
// SKIP-SCALATESTJS,NATIVE-START
status.waitUntilCompleted()
// SKIP-SCALATESTJS,NATIVE-END
assert(rep.testStartingEventsReceived.length == 3)
assert(rep.testStartingEventsReceived(0).testName == "Scenario: test 1")
assert(rep.testStartingEventsReceived(1).testName == "Scenario: test 2")
assert(rep.testStartingEventsReceived(2).testName == "Scenario: test 3")
assert(rep.testSucceededEventsReceived.length == 3)
assert(rep.testSucceededEventsReceived(0).testName == "Scenario: test 1")
assert(rep.testSucceededEventsReceived(1).testName == "Scenario: test 2")
assert(rep.testSucceededEventsReceived(2).testName == "Scenario: test 3")
}
it("should run tests that does not return Future and report their result in serial") {
class ExampleSpec extends AsyncFeatureSpec {
//SCALATESTJS-ONLY implicit override def executionContext = org.scalatest.concurrent.TestExecutionContext.runNow
Scenario("test 1") {
SleepHelper.sleep(60)
succeed
}
Scenario("test 2") {
SleepHelper.sleep(30)
succeed
}
Scenario("test 3") {
succeed
}
}
val rep = new EventRecordingReporter
val suite = new ExampleSpec
val status = suite.run(None, Args(reporter = rep))
// SKIP-SCALATESTJS,NATIVE-START
status.waitUntilCompleted()
// SKIP-SCALATESTJS,NATIVE-END
assert(rep.testStartingEventsReceived.length == 3)
assert(rep.testStartingEventsReceived(0).testName == "Scenario: test 1")
assert(rep.testStartingEventsReceived(1).testName == "Scenario: test 2")
assert(rep.testStartingEventsReceived(2).testName == "Scenario: test 3")
assert(rep.testSucceededEventsReceived.length == 3)
assert(rep.testSucceededEventsReceived(0).testName == "Scenario: test 1")
assert(rep.testSucceededEventsReceived(1).testName == "Scenario: test 2")
assert(rep.testSucceededEventsReceived(2).testName == "Scenario: test 3")
}
it("should send an InfoProvided event for an info in main spec body") {
class MySuite extends AsyncFeatureSpec {
info(
"hi there"
)
}
val suite = new MySuite
val reporter = new EventRecordingReporter
val status = suite.run(None, Args(reporter))
// SKIP-SCALATESTJS,NATIVE-START
status.waitUntilCompleted()
// SKIP-SCALATESTJS,NATIVE-END
val infoList = reporter.infoProvidedEventsReceived
assert(infoList.size == 1)
assert(infoList(0).message == "hi there")
}
it("should send an InfoProvided event for an info in feature body") {
class MySuite extends AsyncFeatureSpec {
//SCALATESTJS-ONLY implicit override def executionContext = org.scalatest.concurrent.TestExecutionContext.runNow
Feature("test feature") {
info(
"hi there"
)
Scenario("test 1") { succeed }
}
}
val suite = new MySuite
val reporter = new EventRecordingReporter
val status = suite.run(None, Args(reporter))
// SKIP-SCALATESTJS,NATIVE-START
status.waitUntilCompleted()
// SKIP-SCALATESTJS,NATIVE-END
val infoList = reporter.infoProvidedEventsReceived
assert(infoList.size == 1)
assert(infoList(0).message == "hi there")
}
it("should send an InfoProvided event for an info in scenario body") {
class MySuite extends AsyncFeatureSpec {
//SCALATESTJS-ONLY implicit override def executionContext = org.scalatest.concurrent.TestExecutionContext.runNow
Feature("test feature") {
Scenario("test 1") {
info("hi there")
succeed
}
}
}
val suite = new MySuite
val reporter = new EventRecordingReporter
val status = suite.run(None, Args(reporter))
// SKIP-SCALATESTJS,NATIVE-START
status.waitUntilCompleted()
// SKIP-SCALATESTJS,NATIVE-END
val infoList = reporter.infoProvidedEventsReceived
assert(infoList.size == 0)
val testSucceededList = reporter.testSucceededEventsReceived
assert(testSucceededList.size == 1)
assert(testSucceededList(0).recordedEvents.size == 1)
val recordedEvent = testSucceededList(0).recordedEvents(0)
assert(recordedEvent.isInstanceOf[InfoProvided])
val infoProvided = recordedEvent.asInstanceOf[InfoProvided]
assert(infoProvided.message == "hi there")
}
it("should send an InfoProvided event for an info in Future returned by scenario body") {
class MySuite extends AsyncFeatureSpec {
//SCALATESTJS-ONLY implicit override def executionContext = org.scalatest.concurrent.TestExecutionContext.runNow
Feature("test feature") {
Scenario("test 1") {
Future {
info("hi there")
succeed
}
}
}
}
val suite = new MySuite
val reporter = new EventRecordingReporter
val status = suite.run(None, Args(reporter))
// SKIP-SCALATESTJS,NATIVE-START
status.waitUntilCompleted()
// SKIP-SCALATESTJS,NATIVE-END
val infoList = reporter.infoProvidedEventsReceived
assert(infoList.size == 0)
val testSucceededList = reporter.testSucceededEventsReceived
assert(testSucceededList.size == 1)
assert(testSucceededList(0).recordedEvents.size == 1)
val recordedEvent = testSucceededList(0).recordedEvents(0)
assert(recordedEvent.isInstanceOf[InfoProvided])
val infoProvided = recordedEvent.asInstanceOf[InfoProvided]
assert(infoProvided.message == "hi there")
}
it("should send a NoteProvided event for a note in main spec body") {
class MySuite extends AsyncFeatureSpec {
note(
"hi there"
)
}
val suite = new MySuite
val reporter = new EventRecordingReporter
val status = suite.run(None, Args(reporter))
// SKIP-SCALATESTJS,NATIVE-START
status.waitUntilCompleted()
// SKIP-SCALATESTJS,NATIVE-END
val noteList = reporter.noteProvidedEventsReceived
assert(noteList.size == 1)
assert(noteList(0).message == "hi there")
}
it("should send a NoteProvided event for a note in feature body") {
class MySuite extends AsyncFeatureSpec {
//SCALATESTJS-ONLY implicit override def executionContext = org.scalatest.concurrent.TestExecutionContext.runNow
Feature("test feature") {
note(
"hi there"
)
Scenario("test 1") { succeed }
}
}
val suite = new MySuite
val reporter = new EventRecordingReporter
val status = suite.run(None, Args(reporter))
// SKIP-SCALATESTJS,NATIVE-START
status.waitUntilCompleted()
// SKIP-SCALATESTJS,NATIVE-END
val noteList = reporter.noteProvidedEventsReceived
assert(noteList.size == 1)
assert(noteList(0).message == "hi there")
}
it("should send a NoteProvided event for a note in scenario body") {
class MySuite extends AsyncFeatureSpec {
//SCALATESTJS-ONLY implicit override def executionContext = org.scalatest.concurrent.TestExecutionContext.runNow
Feature("test feature") {
Scenario("test 1") {
note("hi there")
succeed
}
}
}
val suite = new MySuite
val reporter = new EventRecordingReporter
val status = suite.run(None, Args(reporter))
// SKIP-SCALATESTJS,NATIVE-START
status.waitUntilCompleted()
// SKIP-SCALATESTJS,NATIVE-END
val noteList = reporter.noteProvidedEventsReceived
assert(noteList.size == 1)
assert(noteList(0).message == "hi there")
}
it("should send a NoteProvided event for a note in Future returned by scenario body") {
class MySuite extends AsyncFeatureSpec {
//SCALATESTJS-ONLY implicit override def executionContext = org.scalatest.concurrent.TestExecutionContext.runNow
Feature("test feature") {
Scenario("test 1") {
Future {
note("hi there")
succeed
}
}
}
}
val suite = new MySuite
val reporter = new EventRecordingReporter
val status = suite.run(None, Args(reporter))
// SKIP-SCALATESTJS,NATIVE-START
status.waitUntilCompleted()
// SKIP-SCALATESTJS,NATIVE-END
val noteList = reporter.noteProvidedEventsReceived
assert(noteList.size == 1)
assert(noteList(0).message == "hi there")
}
it("should send an AlertProvided event for an alert in main spec body") {
class MySuite extends AsyncFeatureSpec {
alert(
"hi there"
)
}
val suite = new MySuite
val reporter = new EventRecordingReporter
val status = suite.run(None, Args(reporter))
// SKIP-SCALATESTJS,NATIVE-START
status.waitUntilCompleted()
// SKIP-SCALATESTJS,NATIVE-END
val alertList = reporter.alertProvidedEventsReceived
assert(alertList.size == 1)
assert(alertList(0).message == "hi there")
}
it("should send an AlertProvided event for an alert in feature body") {
class MySuite extends AsyncFeatureSpec {
//SCALATESTJS-ONLY implicit override def executionContext = org.scalatest.concurrent.TestExecutionContext.runNow
Feature("test feature") {
alert(
"hi there"
)
Scenario("test 1") { succeed }
}
}
val suite = new MySuite
val reporter = new EventRecordingReporter
val status = suite.run(None, Args(reporter))
// SKIP-SCALATESTJS,NATIVE-START
status.waitUntilCompleted()
// SKIP-SCALATESTJS,NATIVE-END
val alertList = reporter.alertProvidedEventsReceived
assert(alertList.size == 1)
assert(alertList(0).message == "hi there")
}
it("should send an AlertProvided event for an alert in scenario body") {
class MySuite extends AsyncFeatureSpec {
//SCALATESTJS-ONLY implicit override def executionContext = org.scalatest.concurrent.TestExecutionContext.runNow
Feature("test feature") {
Scenario("test 1") {
alert("hi there")
succeed
}
}
}
val suite = new MySuite
val reporter = new EventRecordingReporter
val status = suite.run(None, Args(reporter))
// SKIP-SCALATESTJS,NATIVE-START
status.waitUntilCompleted()
// SKIP-SCALATESTJS,NATIVE-END
val alertList = reporter.alertProvidedEventsReceived
assert(alertList.size == 1)
assert(alertList(0).message == "hi there")
}
it("should send an AlertProvided event for an alert in Future returned by scenario body") {
class MySuite extends AsyncFeatureSpec {
//SCALATESTJS-ONLY implicit override def executionContext = org.scalatest.concurrent.TestExecutionContext.runNow
Feature("test feature") {
Scenario("test 1") {
Future {
alert("hi there")
succeed
}
}
}
}
val suite = new MySuite
val reporter = new EventRecordingReporter
val status = suite.run(None, Args(reporter))
// SKIP-SCALATESTJS,NATIVE-START
status.waitUntilCompleted()
// SKIP-SCALATESTJS,NATIVE-END
val alertList = reporter.alertProvidedEventsReceived
assert(alertList.size == 1)
assert(alertList(0).message == "hi there")
}
it("should send a MarkupProvided event for a markup in main spec body") {
class MySuite extends AsyncFeatureSpec {
markup(
"hi there"
)
}
val suite = new MySuite
val reporter = new EventRecordingReporter
val status = suite.run(None, Args(reporter))
// SKIP-SCALATESTJS,NATIVE-START
status.waitUntilCompleted()
// SKIP-SCALATESTJS,NATIVE-END
val markupList = reporter.markupProvidedEventsReceived
assert(markupList.size == 1)
assert(markupList(0).text == "hi there")
}
it("should send a MarkupProvided event for a markup in feature body") {
class MySuite extends AsyncFeatureSpec {
//SCALATESTJS-ONLY implicit override def executionContext = org.scalatest.concurrent.TestExecutionContext.runNow
Feature("test feature") {
markup(
"hi there"
)
Scenario("test 1") { succeed }
}
}
val suite = new MySuite
val reporter = new EventRecordingReporter
val status = suite.run(None, Args(reporter))
// SKIP-SCALATESTJS,NATIVE-START
status.waitUntilCompleted()
// SKIP-SCALATESTJS,NATIVE-END
val markupList = reporter.markupProvidedEventsReceived
assert(markupList.size == 1)
assert(markupList(0).text == "hi there")
}
it("should send a MarkupProvided event for a markup in scenario body") {
class MySuite extends AsyncFeatureSpec {
//SCALATESTJS-ONLY implicit override def executionContext = org.scalatest.concurrent.TestExecutionContext.runNow
Feature("test feature") {
Scenario("test 1") {
markup("hi there")
succeed
}
}
}
val suite = new MySuite
val reporter = new EventRecordingReporter
val status = suite.run(None, Args(reporter))
// SKIP-SCALATESTJS,NATIVE-START
status.waitUntilCompleted()
// SKIP-SCALATESTJS,NATIVE-END
val markupList = reporter.markupProvidedEventsReceived
assert(markupList.size == 0)
val testSucceededList = reporter.testSucceededEventsReceived
assert(testSucceededList.size == 1)
assert(testSucceededList(0).recordedEvents.size == 1)
val recordedEvent = testSucceededList(0).recordedEvents(0)
assert(recordedEvent.isInstanceOf[MarkupProvided])
val markupProvided = recordedEvent.asInstanceOf[MarkupProvided]
assert(markupProvided.text == "hi there")
}
it("should send a MarkupProvided event for a markup in Future returned by scenario body") {
class MySuite extends AsyncFeatureSpec {
//SCALATESTJS-ONLY implicit override def executionContext = org.scalatest.concurrent.TestExecutionContext.runNow
Feature("test feature") {
Scenario("test 1") {
Future {
markup("hi there")
succeed
}
}
}
}
val suite = new MySuite
val reporter = new EventRecordingReporter
val status = suite.run(None, Args(reporter))
// SKIP-SCALATESTJS,NATIVE-START
status.waitUntilCompleted()
// SKIP-SCALATESTJS,NATIVE-END
val markupList = reporter.markupProvidedEventsReceived
assert(markupList.size == 0)
val testSucceededList = reporter.testSucceededEventsReceived
assert(testSucceededList.size == 1)
assert(testSucceededList(0).recordedEvents.size == 1)
val recordedEvent = testSucceededList(0).recordedEvents(0)
assert(recordedEvent.isInstanceOf[MarkupProvided])
val markupProvided = recordedEvent.asInstanceOf[MarkupProvided]
assert(markupProvided.text == "hi there")
}
it("should generate NotAllowedException wrapping a DuplicateTestNameException is thrown inside scope") {
class TestSpec extends AsyncFeatureSpec {
Feature("a feature") {
Scenario("test 1") { succeed }
Scenario("test 1") { succeed }
}
}
val e = intercept[NotAllowedException] {
new TestSpec
}
assert("AsyncFeatureSpecSpec.scala" == e.failedCodeFileName.get)
assert(e.failedCodeLineNumber.get == thisLineNumber - 7)
assert(e.cause.isDefined)
val causeThrowable = e.cause.get
assert(e.message == Some(FailureMessages.exceptionWasThrownInFeatureClause(prettifier, UnquotedString(causeThrowable.getClass.getName), "a feature", FailureMessages.duplicateTestName(prettifier, UnquotedString("Feature: a feature Scenario: test 1")))))
assert(causeThrowable.isInstanceOf[DuplicateTestNameException])
val cause = causeThrowable.asInstanceOf[DuplicateTestNameException]
assert(cause.getMessage == FailureMessages.duplicateTestName(prettifier, UnquotedString("Feature: a feature Scenario: test 1")))
}
it("should allow other execution context to be used") {
class TestSpec extends AsyncFeatureSpecLike {
// SKIP-SCALATESTJS,NATIVE-START
override implicit val executionContext = scala.concurrent.ExecutionContext.Implicits.global
// SKIP-SCALATESTJS,NATIVE-END
// SCALATESTJS-ONLY override implicit val executionContext = scala.scalajs.concurrent.JSExecutionContext.runNow
val a = 1
Feature("feature 1") {
Scenario("scenario A") {
Future { assert(a == 1) }
}
}
Feature("feature 2") {
Scenario("scenario B") {
Future { assert(a == 1) }
}
}
Feature("group3") {
Scenario("test C") {
Future { assert(a == 1) }
}
}
}
val suite = new TestSpec
val reporter = new EventRecordingReporter
val status = suite.run(None, Args(reporter))
// SKIP-SCALATESTJS,NATIVE-START
status.waitUntilCompleted()
// SKIP-SCALATESTJS,NATIVE-END
assert(reporter.scopeOpenedEventsReceived.length == 3)
assert(reporter.scopeClosedEventsReceived.length == 3)
assert(reporter.testStartingEventsReceived.length == 3)
assert(reporter.testSucceededEventsReceived.length == 3)
}
}
}
|
scalatest/scalatest
|
jvm/featurespec-test/src/test/scala/org/scalatest/featurespec/AsyncFeatureSpecSpec.scala
|
Scala
|
apache-2.0
| 29,688 |
package controllers
import play.api.libs.json._
import play.api.mvc._
import models.Person._
import javax.inject.Inject
import scala.concurrent.ExecutionContext.Implicits.global
import scala.util.{Failure, Success}
import dao.PersonDAO
import scala.concurrent.Future
class PersonController @Inject()(personDao: PersonDAO) extends Controller {
def listPersons = Action.async { implicit request =>
val persons: Future[Seq[Person]] = personDao.all()
persons map {
p => Ok(Json.toJson(p))
}
}
def getPerson(personId: Int) = Action.async { implicit request =>
val person: Future[Option[Person]] = personDao.getPerson(personId)
person map {
case Some(p) => Ok(Json.toJson(p))
case None => NotFound
}
}
def updatePerson(personId: Int) = Action.async(parse.json[Person]) { implicit request =>
val person: Person = request.body
val affectedRowsCount: Future[Int] = personDao.updatePerson(personId, person)
affectedRowsCount map {
case 1 => Ok
case 0 => NotFound
case _ => InternalServerError
}
}
def createPerson = Action.async(parse.json[Person]) { implicit request =>
val person: Person = request.body
val personId: Future[Int] = personDao.createPerson(person)
personId map {
case id => Created(Json.toJson(id))
}
}
def deletePerson(personId: Int) = Action.async { implicit request =>
val affectedRowsCount: Future[Int] = personDao.deletePerson(personId)
affectedRowsCount map {
case 1 => Ok
case 0 => NotFound
case _ => InternalServerError
}
}
def getCoaches(teamId: Int) = Action.async { implicit request =>
val coaches: Future[Seq[Person]] = personDao.getCoaches(teamId)
coaches map {
p => {
Ok(Json.toJson(p.map("/person/" + _.personid)))
}
}
}
}
|
magura42/KickAppServer
|
app/controllers/PersonController.scala
|
Scala
|
mit
| 1,841 |
/**
* Copyright (C) 2011 Orbeon, Inc.
*
* This program is free software; you can redistribute it and/or modify it under the terms of the
* GNU Lesser General Public License as published by the Free Software Foundation; either version
* 2.1 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Lesser General Public License for more details.
*
* The full text of the license is available at http://www.gnu.org/copyleft/lesser.html
*/
package org.orbeon.oxf.xforms.processor
import org.orbeon.oxf.util._
import ScalaUtils._
import java.io._
import java.util.regex.Matcher
import org.orbeon.oxf.common.Version
import org.orbeon.oxf.controller.PageFlowControllerProcessor
import org.orbeon.oxf.externalcontext.URLRewriter
import org.orbeon.oxf.pipeline.api.ExternalContext
import org.orbeon.oxf.pipeline.api.PipelineContext
import org.orbeon.oxf.resources.ResourceManagerWrapper
import org.orbeon.oxf.util._
import org.orbeon.oxf.xforms.processor.XFormsFeatures.ResourceConfig
import scala.util.Try
import scala.util.control.NonFatal
object XFormsResourceRewriter extends Logging {
/**
* Generate the resources into the given OutputStream. The stream is flushed and closed when done.
*
* @param logger logger
* @param resources list of ResourceConfig to consider
* @param os OutputStream to write to
* @param isCSS whether to generate CSS or JavaScript resources
* @param isMinimal whether to use minimal resources
*/
def generateAndClose(
resources : Seq[ResourceConfig],
namespaceOpt : Option[String],
os : OutputStream,
isCSS : Boolean,
isMinimal : Boolean)(
implicit logger: IndentedLogger
): Unit =
useAndClose(os) { _ β
if (isCSS)
generateCSS(resources, namespaceOpt, os, isMinimal)
else
generateJS(resources, os, isMinimal)
os.flush()
}
private def logFailure[T](path: String)(implicit logger: IndentedLogger): PartialFunction[Throwable, Any] = {
case NonFatal(t) β
error("could not read resource to aggregate", Seq("resource" β path))
}
private def generateCSS(
resources : Seq[ResourceConfig],
namespaceOpt : Option[String],
os : OutputStream,
isMinimal : Boolean)(
implicit logger: IndentedLogger
): Unit = {
val response = NetUtils.getExternalContext.getResponse
val pipelineContext = PipelineContext.get
// Create matcher that matches all paths in case resources are versioned
if (pipelineContext.getAttribute(PageFlowControllerProcessor.PathMatchers) eq null) {
val matchAllPathMatcher = URLRewriterUtils.getMatchAllPathMatcher
pipelineContext.setAttribute(PageFlowControllerProcessor.PathMatchers, matchAllPathMatcher)
}
val rm = ResourceManagerWrapper.instance
// NOTE: The idea is that:
// - we recover and log resource read errors (a file can be missing for example during development)
// - we don't recover when writing (writing the resources will be interupted)
def tryInputStream(path: String) =
Try(rm.getContentAsStream(path)) onFailure logFailure(path)
// Use iterators so that we don't open all input streams at once
def inputStreamIterator =
for {
resource β resources.iterator
path = resource.getResourcePath(isMinimal)
is β tryInputStream(path).iterator
} yield
path β is
def tryReadCSS(path: String, is: InputStream) =
Try {
val sbw = new StringBuilderWriter
copyReader(new InputStreamReader(is, "utf-8"), sbw)
sbw.toString
} onFailure
logFailure(path)
val readCSSIterator =
for {
(path, is) β inputStreamIterator
originalCSS β tryReadCSS(path, is).iterator
} yield
path β originalCSS
val outputWriter = new OutputStreamWriter(os, "utf-8")
// Output Orbeon Forms version if allowed
Version.versionStringIfAllowed foreach { version β
outputWriter.write(s"/* This file was produced by $version */\\n")
}
// Write and rewrite all resources one after the other
readCSSIterator foreach {
case (path, originalCSS) β
if (! isMinimal)
outputWriter.write("/* Original CSS path: " + path + " */\\n")
outputWriter.write(rewriteCSS(originalCSS, path, namespaceOpt, response))
}
outputWriter.flush()
}
private val MatchSelectorAndBlock = """([^\\{]*\\s*)(\\{[^\\}]*\\})""".r
private val MatchId = """#([\\w]+)""".r
private val MatchURL = """url\\(("|')?([^"^'^\\)]*)("|')?\\)""".r
// Public for unit tests
def rewriteCSS(
css : String,
resourcePath : String,
namespaceOpt : Option[String],
response : ExternalContext.Response)(
implicit logger: IndentedLogger
): String = {
// Match and rewrite an id within a selector
def rewriteSelector(s: String) = namespaceOpt match {
case Some(namespace) β MatchId.replaceAllIn(s, e β Matcher.quoteReplacement("#" + namespace + e.group(1)))
case None β s
}
// Rewrite an individual URL
def tryRewriteURL(url: String) =
Try {
val resolvedURI = NetUtils.resolveURI(url, resourcePath)
val rewrittenURI = response.rewriteResourceURL(resolvedURI, URLRewriter.REWRITE_MODE_ABSOLUTE_PATH_OR_RELATIVE)
"url(" + rewrittenURI + ")"
} recover {
case NonFatal(t) β
warn("found invalid URI in CSS file", Seq("uri" β url))
"url(" + url + ")"
}
// Match and rewrite a URL within a block
def rewriteBlock(s: String) =
MatchURL.replaceAllIn(s, e β Matcher.quoteReplacement(tryRewriteURL(e.group(2)).get))
// Find approximately pairs of selectors/blocks and rewrite each part
// Ids are rewritten only if the namespace is not empty
MatchSelectorAndBlock.replaceAllIn(css, e β Matcher.quoteReplacement(rewriteSelector(e.group(1)) + rewriteBlock(e.group(2))))
}
private def generateJS(
resources : Seq[ResourceConfig],
os : OutputStream,
isMinimal : Boolean)(
implicit logger: IndentedLogger
): Unit = {
// Output Orbeon Forms version if allowed
Version.versionStringIfAllowed foreach { version β
val outputWriter = new OutputStreamWriter(os, "utf-8")
outputWriter.write(s"// This file was produced by $version\\n")
outputWriter.flush()
}
val rm = ResourceManagerWrapper.instance
def tryInputStream(path: String) =
Try(rm.getContentAsStream(path)) onFailure logFailure(path)
// Use iterators so that we don't open all input streams at once
def inputStreamIterator =
resources.iterator flatMap (r β tryInputStream(r.getResourcePath(isMinimal)).iterator)
// Write all resources one after the other
inputStreamIterator foreach { is β
useAndClose(is)(NetUtils.copyStream(_, os))
os.write('\\n')
}
}
// Compute the last modification date of the given resources.
def computeCombinedLastModified(resources: Seq[ResourceConfig], isMinimal: Boolean): Long = {
val rm = ResourceManagerWrapper.instance
// NOTE: Actual aggregation will log missing files so we ignore them here
def lastModified(r: ResourceConfig) =
Try(rm.lastModified(r.getResourcePath(isMinimal), false)) getOrElse 0L
if (resources.isEmpty) 0L else resources map lastModified max
}
def cacheResources(
resources : Seq[ResourceConfig],
resourcePath : String,
namespaceOpt : Option[String],
combinedLastModified: Long,
isCSS : Boolean,
isMinimal : Boolean
): File = {
implicit val indentedLogger = XFormsResourceServer.indentedLogger
val rm = ResourceManagerWrapper.instance
Option(rm.getRealPath(resourcePath)) match {
case Some(realPath) β
// We hope to be able to cache as a resource
def logParameters = Seq("resource path" β resourcePath, "real path" β realPath)
val resourceFile = new File(realPath)
if (resourceFile.exists) {
// Resources exist, generate if needed
val resourceLastModified = resourceFile.lastModified
if (resourceLastModified < combinedLastModified) {
// Resource is out of date, generate
debug("cached combined resources out of date, saving", logParameters)
val fos = new FileOutputStream(resourceFile)
generateAndClose(resources, namespaceOpt, fos, isCSS, isMinimal)(indentedLogger)
} else
debug("cached combined resources exist and are up-to-date", logParameters)
} else {
// Resource doesn't exist, generate
debug("cached combined resources don't exist, saving", logParameters)
resourceFile.getParentFile.mkdirs()
resourceFile.createNewFile()
val fos = new FileOutputStream(resourceFile)
generateAndClose(resources, namespaceOpt, fos, isCSS, isMinimal)(indentedLogger)
}
resourceFile
case None β
debug("unable to locate real path for cached combined resources, not saving", Seq("resource path" β resourcePath))
null
}
}
}
|
ajw625/orbeon-forms
|
src/main/scala/org/orbeon/oxf/xforms/processor/XFormsResourceRewriter.scala
|
Scala
|
lgpl-2.1
| 10,572 |
package debop4s.data.slick
/**
* μ¬μ©μ μ μ μν₯κ³Ό DB 컬λΌκ°μ λ³νμ μνν©λλ€.
* @author [email protected]
*/
package object customtypes {
// TODO: examples.JdbcTypeFunSuite λ₯Ό μ°Έκ³ ν΄μ implicit ν¨μλ₯Ό μ 곡ν©λλ€.
}
|
debop/debop4s
|
debop4s-data-slick/src/main/scala/debop4s/data/slick/customtypes/package.scala
|
Scala
|
apache-2.0
| 267 |
package roshan.map
import akka.actor.{Actor, ActorRef}
import roshan.protocols.MapProtocol.CharacterId
import roshan.buffer.Msg.{CharacterAction, ACTION}
import roshan.model.Direction._
import roshan.protocols.CharacterChangesProtocol._
/** Event Box is a event pub/sub system for map boxes. */
trait EventBox extends Actor {
var subscribers = Set[ActorRef]()
def publish(event: CharacterChangeBroadcast) {
subscribers foreach (_ ! event)
}
def subscribe(subscriber: ActorRef) {
subscribers += subscriber
}
def unsubscribe(subscriber: ActorRef) {
subscribers -= subscriber
}
def publishCharacterChange(
id: CharacterId,
x: Int = -1,
y: Int = -1,
action: ACTION = null,
direction: Direction = null,
walk: Boolean = false,
isGone: Boolean = false,
say: String = null
) {
val msg = CharacterAction.newBuilder()
msg.setId(id.underlying)
if (isGone)
msg.setGone(true)
if (action != null)
msg.setAction(action)
if (x != -1 && y != -1)
msg.setX(x).setY(y)
if (walk)
msg.setWalk(true)
if (direction != null)
msg.setDirection(direction.id)
if (say != null)
msg.setSay(say)
publish(CharacterChangeBroadcast(msg.build()))
}
def SubUnsub: Receive = {
case Subscribe() =>
subscribe(sender)
case Unsubscribe =>
unsubscribe(sender)
}
}
|
andychase/roshan
|
src/main/scala/roshan/map/EventBox.scala
|
Scala
|
mit
| 1,624 |
/*
* Copyright 2017 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.accounts.frs10x.boxes
import org.mockito.Mockito._
import uk.gov.hmrc.ct.accounts.AccountStatementValidationFixture
import uk.gov.hmrc.ct.accounts.frs102.helper.DirectorsReportEnabled
import uk.gov.hmrc.ct.accounts.frs10x.retriever.Frs10xDirectorsBoxRetriever
import uk.gov.hmrc.ct.box.retriever.FilingAttributesBoxValueRetriever
class AC8899Spec extends AccountStatementValidationFixture[Frs10xDirectorsBoxRetriever with FilingAttributesBoxValueRetriever] {
trait MockRetriever extends Frs10xDirectorsBoxRetriever with FilingAttributesBoxValueRetriever
override val boxRetriever: MockRetriever = mock[MockRetriever] (RETURNS_SMART_NULLS)
override def setupMocks() = {
when(boxRetriever.directorsReportEnabled()).thenReturn(DirectorsReportEnabled(true))
}
doStatementValidationTests("AC8899", AC8899.apply)
"validation passes if not enabled" in {
when(boxRetriever.directorsReportEnabled()).thenReturn(DirectorsReportEnabled(false))
AC8899(None).validate(boxRetriever) shouldBe Set.empty
}
}
|
pncampbell/ct-calculations
|
src/test/scala/uk/gov/hmrc/ct/accounts/frs10x/boxes/AC8899Spec.scala
|
Scala
|
apache-2.0
| 1,652 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources
import java.io.File
import java.net.URI
import scala.collection.mutable
import scala.language.reflectiveCalls
import org.apache.hadoop.fs.{BlockLocation, FileStatus, LocatedFileStatus, Path, RawLocalFileSystem}
import org.apache.spark.metrics.source.HiveCatalogMetrics
import org.apache.spark.sql.catalyst.util._
import org.apache.spark.sql.functions.col
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.test.SharedSQLContext
import org.apache.spark.util.{KnownSizeEstimation, SizeEstimator}
class FileIndexSuite extends SharedSQLContext {
test("InMemoryFileIndex: leaf files are qualified paths") {
withTempDir { dir =>
val file = new File(dir, "text.txt")
stringToFile(file, "text")
val path = new Path(file.getCanonicalPath)
val catalog = new InMemoryFileIndex(spark, Seq(path), Map.empty, None) {
def leafFilePaths: Seq[Path] = leafFiles.keys.toSeq
def leafDirPaths: Seq[Path] = leafDirToChildrenFiles.keys.toSeq
}
assert(catalog.leafFilePaths.forall(p => p.toString.startsWith("file:/")))
assert(catalog.leafDirPaths.forall(p => p.toString.startsWith("file:/")))
}
}
test("InMemoryFileIndex: input paths are converted to qualified paths") {
withTempDir { dir =>
val file = new File(dir, "text.txt")
stringToFile(file, "text")
val unqualifiedDirPath = new Path(dir.getCanonicalPath)
val unqualifiedFilePath = new Path(file.getCanonicalPath)
require(!unqualifiedDirPath.toString.contains("file:"))
require(!unqualifiedFilePath.toString.contains("file:"))
val fs = unqualifiedDirPath.getFileSystem(spark.sessionState.newHadoopConf())
val qualifiedFilePath = fs.makeQualified(new Path(file.getCanonicalPath))
require(qualifiedFilePath.toString.startsWith("file:"))
val catalog1 = new InMemoryFileIndex(
spark, Seq(unqualifiedDirPath), Map.empty, None)
assert(catalog1.allFiles.map(_.getPath) === Seq(qualifiedFilePath))
val catalog2 = new InMemoryFileIndex(
spark, Seq(unqualifiedFilePath), Map.empty, None)
assert(catalog2.allFiles.map(_.getPath) === Seq(qualifiedFilePath))
}
}
test("InMemoryFileIndex: folders that don't exist don't throw exceptions") {
withTempDir { dir =>
val deletedFolder = new File(dir, "deleted")
assert(!deletedFolder.exists())
val catalog1 = new InMemoryFileIndex(
spark, Seq(new Path(deletedFolder.getCanonicalPath)), Map.empty, None)
// doesn't throw an exception
assert(catalog1.listLeafFiles(catalog1.rootPaths).isEmpty)
}
}
test("PartitioningAwareFileIndex listing parallelized with many top level dirs") {
for ((scale, expectedNumPar) <- Seq((10, 0), (50, 1))) {
withTempDir { dir =>
val topLevelDirs = (1 to scale).map { i =>
val tmp = new File(dir, s"foo=$i.txt")
tmp.mkdir()
new Path(tmp.getCanonicalPath)
}
HiveCatalogMetrics.reset()
assert(HiveCatalogMetrics.METRIC_PARALLEL_LISTING_JOB_COUNT.getCount() == 0)
new InMemoryFileIndex(spark, topLevelDirs, Map.empty, None)
assert(HiveCatalogMetrics.METRIC_PARALLEL_LISTING_JOB_COUNT.getCount() == expectedNumPar)
}
}
}
test("PartitioningAwareFileIndex listing parallelized with large child dirs") {
for ((scale, expectedNumPar) <- Seq((10, 0), (50, 1))) {
withTempDir { dir =>
for (i <- 1 to scale) {
new File(dir, s"foo=$i.txt").mkdir()
}
HiveCatalogMetrics.reset()
assert(HiveCatalogMetrics.METRIC_PARALLEL_LISTING_JOB_COUNT.getCount() == 0)
new InMemoryFileIndex(spark, Seq(new Path(dir.getCanonicalPath)), Map.empty, None)
assert(HiveCatalogMetrics.METRIC_PARALLEL_LISTING_JOB_COUNT.getCount() == expectedNumPar)
}
}
}
test("PartitioningAwareFileIndex listing parallelized with large, deeply nested child dirs") {
for ((scale, expectedNumPar) <- Seq((10, 0), (50, 4))) {
withTempDir { dir =>
for (i <- 1 to 2) {
val subdirA = new File(dir, s"a=$i")
subdirA.mkdir()
for (j <- 1 to 2) {
val subdirB = new File(subdirA, s"b=$j")
subdirB.mkdir()
for (k <- 1 to scale) {
new File(subdirB, s"foo=$k.txt").mkdir()
}
}
}
HiveCatalogMetrics.reset()
assert(HiveCatalogMetrics.METRIC_PARALLEL_LISTING_JOB_COUNT.getCount() == 0)
new InMemoryFileIndex(spark, Seq(new Path(dir.getCanonicalPath)), Map.empty, None)
assert(HiveCatalogMetrics.METRIC_PARALLEL_LISTING_JOB_COUNT.getCount() == expectedNumPar)
}
}
}
test("InMemoryFileIndex - file filtering") {
assert(!InMemoryFileIndex.shouldFilterOut("abcd"))
assert(InMemoryFileIndex.shouldFilterOut(".ab"))
assert(InMemoryFileIndex.shouldFilterOut("_cd"))
assert(!InMemoryFileIndex.shouldFilterOut("_metadata"))
assert(!InMemoryFileIndex.shouldFilterOut("_common_metadata"))
assert(InMemoryFileIndex.shouldFilterOut("_ab_metadata"))
assert(InMemoryFileIndex.shouldFilterOut("_cd_common_metadata"))
assert(InMemoryFileIndex.shouldFilterOut("a._COPYING_"))
}
test("SPARK-17613 - PartitioningAwareFileIndex: base path w/o '/' at end") {
class MockCatalog(
override val rootPaths: Seq[Path])
extends PartitioningAwareFileIndex(spark, Map.empty, None) {
override def refresh(): Unit = {}
override def leafFiles: mutable.LinkedHashMap[Path, FileStatus] = mutable.LinkedHashMap(
new Path("mockFs://some-bucket/file1.json") -> new FileStatus()
)
override def leafDirToChildrenFiles: Map[Path, Array[FileStatus]] = Map(
new Path("mockFs://some-bucket/") -> Array(new FileStatus())
)
override def partitionSpec(): PartitionSpec = {
PartitionSpec.emptySpec
}
}
withSQLConf(
"fs.mockFs.impl" -> classOf[FakeParentPathFileSystem].getName,
"fs.mockFs.impl.disable.cache" -> "true") {
val pathWithSlash = new Path("mockFs://some-bucket/")
assert(pathWithSlash.getParent === null)
val pathWithoutSlash = new Path("mockFs://some-bucket")
assert(pathWithoutSlash.getParent === null)
val catalog1 = new MockCatalog(Seq(pathWithSlash))
val catalog2 = new MockCatalog(Seq(pathWithoutSlash))
assert(catalog1.allFiles().nonEmpty)
assert(catalog2.allFiles().nonEmpty)
}
}
test("InMemoryFileIndex with empty rootPaths when PARALLEL_PARTITION_DISCOVERY_THRESHOLD" +
"is a nonpositive number") {
withSQLConf(SQLConf.PARALLEL_PARTITION_DISCOVERY_THRESHOLD.key -> "0") {
new InMemoryFileIndex(spark, Seq.empty, Map.empty, None)
}
val e = intercept[IllegalArgumentException] {
withSQLConf(SQLConf.PARALLEL_PARTITION_DISCOVERY_THRESHOLD.key -> "-1") {
new InMemoryFileIndex(spark, Seq.empty, Map.empty, None)
}
}.getMessage
assert(e.contains("The maximum number of paths allowed for listing files at " +
"driver side must not be negative"))
}
test("refresh for InMemoryFileIndex with FileStatusCache") {
withTempDir { dir =>
val fileStatusCache = FileStatusCache.getOrCreate(spark)
val dirPath = new Path(dir.getAbsolutePath)
val fs = dirPath.getFileSystem(spark.sessionState.newHadoopConf())
val catalog =
new InMemoryFileIndex(spark, Seq(dirPath), Map.empty, None, fileStatusCache) {
def leafFilePaths: Seq[Path] = leafFiles.keys.toSeq
def leafDirPaths: Seq[Path] = leafDirToChildrenFiles.keys.toSeq
}
val file = new File(dir, "text.txt")
stringToFile(file, "text")
assert(catalog.leafDirPaths.isEmpty)
assert(catalog.leafFilePaths.isEmpty)
catalog.refresh()
assert(catalog.leafFilePaths.size == 1)
assert(catalog.leafFilePaths.head == fs.makeQualified(new Path(file.getAbsolutePath)))
assert(catalog.leafDirPaths.size == 1)
assert(catalog.leafDirPaths.head == fs.makeQualified(dirPath))
}
}
test("SPARK-20280 - FileStatusCache with a partition with very many files") {
/* fake the size, otherwise we need to allocate 2GB of data to trigger this bug */
class MyFileStatus extends FileStatus with KnownSizeEstimation {
override def estimatedSize: Long = 1000 * 1000 * 1000
}
/* files * MyFileStatus.estimatedSize should overflow to negative integer
* so, make it between 2bn and 4bn
*/
val files = (1 to 3).map { i =>
new MyFileStatus()
}
val fileStatusCache = FileStatusCache.getOrCreate(spark)
fileStatusCache.putLeafFiles(new Path("/tmp", "abc"), files.toArray)
}
test("SPARK-20367 - properly unescape column names in inferPartitioning") {
withTempPath { path =>
val colToUnescape = "Column/#%'?"
spark
.range(1)
.select(col("id").as(colToUnescape), col("id"))
.write.partitionBy(colToUnescape).parquet(path.getAbsolutePath)
assert(spark.read.parquet(path.getAbsolutePath).schema.exists(_.name == colToUnescape))
}
}
test("SPARK-25062 - InMemoryFileIndex stores BlockLocation objects no matter what subclass " +
"the FS returns") {
withSQLConf("fs.file.impl" -> classOf[SpecialBlockLocationFileSystem].getName) {
withTempDir { dir =>
val file = new File(dir, "text.txt")
stringToFile(file, "text")
val inMemoryFileIndex = new InMemoryFileIndex(
spark, Seq(new Path(file.getCanonicalPath)), Map.empty, None) {
def leafFileStatuses = leafFiles.values
}
val blockLocations = inMemoryFileIndex.leafFileStatuses.flatMap(
_.asInstanceOf[LocatedFileStatus].getBlockLocations)
assert(blockLocations.forall(_.getClass == classOf[BlockLocation]))
}
}
}
}
class FakeParentPathFileSystem extends RawLocalFileSystem {
override def getScheme: String = "mockFs"
override def getUri: URI = {
URI.create("mockFs://some-bucket")
}
}
class SpecialBlockLocationFileSystem extends RawLocalFileSystem {
class SpecialBlockLocation(
names: Array[String],
hosts: Array[String],
offset: Long,
length: Long)
extends BlockLocation(names, hosts, offset, length)
override def getFileBlockLocations(
file: FileStatus,
start: Long,
len: Long): Array[BlockLocation] = {
Array(new SpecialBlockLocation(Array("dummy"), Array("dummy"), 0L, file.getLen))
}
}
|
ahnqirage/spark
|
sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/FileIndexSuite.scala
|
Scala
|
apache-2.0
| 11,391 |
package controllers
import java.util.UUID
import javax.inject.{Inject, Singleton}
import com.mohiva.play.silhouette.api.Environment
import com.mohiva.play.silhouette.impl.authenticators.CookieAuthenticator
import models._
import models.daos.PCMContainerDAO
import org.opencompare.api.java.{PCMContainer, PCMFactory}
import org.opencompare.api.java.impl.PCMFactoryImpl
import org.opencompare.api.java.impl.io.{KMFJSONExporter, KMFJSONLoader}
import org.opencompare.api.java.io.{CSVExporter, HTMLExporter}
import org.opencompare.api.java.extractor.CellContentInterpreter
import org.opencompare.io.wikipedia.io.{MediaWikiAPI, WikiTextExporter, WikiTextLoader, WikiTextTemplateProcessor}
import org.opencompare.io.wikipedia.parser.CellContentExtractor
import play.api.data.Form
import play.api.i18n.{I18nSupport, MessagesApi}
import play.api.libs.json._
import play.api.mvc.{Action, Controller}
import collection.JavaConversions._
import play.api.libs.concurrent.Execution.Implicits.defaultContext
import scala.concurrent.Future
import JSONformating._
/**
* Created by gbecan on 08/01/15.
* Updated by smangin on 21/05/15
*/
@Singleton
class PCMAPI @Inject() (
val messagesApi: MessagesApi,
val env: Environment[User, CookieAuthenticator],
val pcmContainerDAO: PCMContainerDAO,
val pcmAPIUtils : PCMAPIUtils
) extends BaseController {
private val pcmFactory : PCMFactory = new PCMFactoryImpl()
private val mediaWikiAPI : MediaWikiAPI = new MediaWikiAPI("wikipedia.org")
private val wikitextTemplateProcessor : WikiTextTemplateProcessor= new WikiTextTemplateProcessor(mediaWikiAPI)
private val miner : WikiTextLoader= new WikiTextLoader(wikitextTemplateProcessor)
private val cellContentInterpreter : CellContentInterpreter = new CellContentInterpreter(pcmFactory)
def get(id : String) = Action.async {
val result = pcmContainerDAO.get(id)
result flatMap { dbPCM =>
if (dbPCM.isDefined) {
val futureJson = pcmAPIUtils.serializePCMContainer(dbPCM.get.pcmContainer.get)
futureJson map { json =>
Ok(json).withHeaders(
"Access-Control-Allow-Origin" -> "*"
)
}
} else {
Future.successful(NotFound(id))
}
}
}
def getnewjson(id : String) = Action.async {
val result = pcmContainerDAO.get(id)
result flatMap { dbPCM =>
if (dbPCM.isDefined) {
val pcmC = dbPCM.get.pcmContainer.get
val jsonContainer = Json.parse(PCMtoJSON.mkNewJSONFormatFromPCM(pcmC).export()).as[JsObject]
Future.successful(Ok(jsonContainer).withHeaders(
"Access-Control-Allow-Origin" -> "*"
))
} else {
Future.successful(NotFound(id))
}
}
}
def save(id : String) = Action { request =>
val json = request.body.asJson.get
val ipAddress = request.remoteAddress; // TODO : For future work !
val pcmContainers = pcmAPIUtils.parsePCMContainers(json)
if (pcmContainers.size == 1) {
val databasePCM = new DatabasePCM(Some(id), Some(pcmContainers.head))
Database.update(databasePCM)
Ok("")
} else {
BadRequest("multiple pcms not supported")
}
}
def create() = Action { request =>
val json = request.body.asJson.get
val pcmContainers = pcmAPIUtils.parsePCMContainers(json)
if (pcmContainers.size == 1) {
val id = Database.create(pcmContainers.get(0))
Ok(id)
} else {
BadRequest("multiple pcms not supported")
}
}
def remove(id : String) = SecuredAction(AdminAuthorization()) {
Database.remove(id)
Ok("")
}
def extractContent = Action { request =>
val json = request.body.asJson.get.as[JsObject]
val pcmType = json.value.get("type")
val rawContent = json.value.get("rawContent")
if (pcmType.isDefined && rawContent.isDefined) {
val pcmTypeString = pcmType.get.as[JsString].value
val rawContentString = rawContent.get.as[JsString].value
if (pcmTypeString == "wikipedia") {
val language = "en"
val wikitextContentExtractor = new CellContentExtractor(language, miner.preprocessor, wikitextTemplateProcessor, miner.parser)
val content = wikitextContentExtractor.extractCellContent(rawContentString)
Ok(content)
} else {
BadRequest("unknown type")
}
} else {
BadRequest("type and content must be defined")
}
}
def search(searchedString : String) = Action {
val results = Database.search(searchedString).toList
val jsonResults = JsArray(results.map(result =>
JsObject(Seq(
"id" -> JsString(result.id),
"name" -> JsString(result.name)
))
))
Ok(jsonResults)
}
}
|
OpenCompare/OpenCompare
|
org.opencompare/play-app/app/controllers/PCMAPI.scala
|
Scala
|
apache-2.0
| 5,182 |
package com.codahale.jerkson.tests
import com.codahale.simplespec.Spec
import com.codahale.jerkson.Json._
import com.fasterxml.jackson.databind.node.IntNode
import com.fasterxml.jackson.databind.JsonNode
import org.junit.Test
class BasicTypeSupportSpec extends Spec {
class `A Byte` {
@Test def `generates a JSON int` = {
generate(15.toByte).must(be("15"))
}
@Test def `is parsable from a JSON int` = {
parse[Byte]("15").must(be(15))
}
}
class `A Short` {
@Test def `generates a JSON int` = {
generate(15.toShort).must(be("15"))
}
@Test def `is parsable from a JSON int` = {
parse[Short]("15").must(be(15))
}
}
class `An Int` {
@Test def `generates a JSON int` = {
generate(15).must(be("15"))
}
@Test def `is parsable from a JSON int` = {
parse[Int]("15").must(be(15))
}
}
class `A Long` {
@Test def `generates a JSON int` = {
generate(15L).must(be("15"))
}
@Test def `is parsable from a JSON int` = {
parse[Long]("15").must(be(15L))
}
}
class `A BigInt` {
@Test def `generates a JSON int` = {
generate(BigInt(15)).must(be("15"))
}
@Test def `is parsable from a JSON int` = {
parse[BigInt]("15").must(be(BigInt(15)))
}
@Test def `is parsable from a JSON string` = {
parse[BigInt]("\"15\"").must(be(BigInt(15)))
}
}
class `A Float` {
@Test def `generates a JSON float` = {
generate(15.1F).must(be("15.1"))
}
@Test def `is parsable from a JSON float` = {
parse[Float]("15.1").must(be(15.1F))
}
}
class `A Double` {
@Test def `generates a JSON float` = {
generate(15.1).must(be("15.1"))
}
@Test def `is parsable from a JSON float` = {
parse[Double]("15.1").must(be(15.1D))
}
}
class `A BigDecimal` {
@Test def `generates a JSON float` = {
generate(BigDecimal(15.5)).must(be("15.5"))
}
@Test def `is parsable from a JSON float` = {
parse[BigDecimal]("15.5").must(be(BigDecimal(15.5)))
}
@Test def `is parsable from a JSON int` = {
parse[BigDecimal]("15").must(be(BigDecimal(15.0)))
}
}
class `A String` {
@Test def `generates a JSON string` = {
generate("woo").must(be("\"woo\""))
}
@Test def `is parsable from a JSON string` = {
parse[String]("\"woo\"").must(be("woo"))
}
}
class `A StringBuilder` {
@Test def `generates a JSON string` = {
generate(new StringBuilder("foo")).must(be("\"foo\""))
}
@Test def `is parsable from a JSON string` = {
parse[StringBuilder]("\"foo\"").toString().must(be("foo"))
}
}
class `A null Object` {
@Test def `generates a JSON null` = {
generate[Object](null).must(be("null"))
}
@Test def `is parsable from a JSON null` = {
parse[Object]("null").must(be(not(notNull)))
}
}
class `A Boolean` {
@Test def `generates a JSON true` = {
generate(true).must(be("true"))
}
@Test def `generates a JSON false` = {
generate(false).must(be("false"))
}
@Test def `is parsable from a JSON true` = {
parse[Boolean]("true").must(be(true))
}
@Test def `is parsable from a JSON false` = {
parse[Boolean]("false").must(be(false))
}
}
class `A Some[Int]` {
@Test def `generates a JSON int` = {
generate(Some(12)).must(be("12"))
}
@Test def `is parsable from a JSON int as an Option[Int]` = {
parse[Option[Int]]("12").must(be(Some(12)))
}
}
class `A None` {
@Test def `generates a JSON null` = {
generate(None).must(be("null"))
}
@Test def `is parsable from a JSON null as an Option[Int]` = {
parse[Option[Int]]("null").must(be(None))
}
}
class `A Left[String]` {
@Test def `generates a JSON string` = {
generate(Left("woo")).must(be("\"woo\""))
}
@Test def `is parsable from a JSON string as an Either[String, Int]` = {
parse[Either[String, Int]]("\"woo\"").must(be(Left("woo")))
}
}
class `A Right[String]` {
@Test def `generates a JSON string` = {
generate(Right("woo")).must(be("\"woo\""))
}
@Test def `is parsable from a JSON string as an Either[Int, String]` = {
parse[Either[Int, String]]("\"woo\"").must(be(Right("woo")))
}
}
class `A JsonNode` {
@Test def `generates whatever the JsonNode is` = {
generate(new IntNode(2)).must(be("2"))
}
@Test def `is parsable from a JSON AST node` = {
parse[JsonNode]("2").must(be(new IntNode(2)))
}
@Test def `is parsable from a JSON AST node as a specific type` = {
parse[IntNode]("2").must(be(new IntNode(2)))
}
@Test def `is itself parsable` = {
parse[Int](new IntNode(2)).must(be(2))
}
}
class `An Array[Int]` {
@Test def `generates a JSON array of ints` = {
generate(Array(1, 2, 3)).must(be("[1,2,3]"))
}
@Test def `is parsable from a JSON array of ints` = {
parse[Array[Int]]("[1,2,3]").toList.must(be(List(1, 2, 3)))
}
@Test def `is parsable from an empty JSON array` = {
parse[Array[Int]]("[]").toList.must(be(List.empty))
}
}
}
|
codahale/jerkson
|
src/test/scala/com/codahale/jerkson/tests/BasicTypeSupportSpec.scala
|
Scala
|
mit
| 5,208 |
package sampler.example.abc.flockMortality.util
import play.api.libs.json.JsLookupResult.jsLookupResultToJsLookup
import play.api.libs.json.JsValue
import play.api.libs.json.JsValue.jsValueToJsLookup
import play.api.libs.json.Writes
import play.api.libs.json.Json
import play.api.libs.json.JsObject
import sampler._
case class Posterior(
beta: IndexedSeq[Double],
eta: IndexedSeq[Double],
gamma: IndexedSeq[Double],
delta: IndexedSeq[Double],
sigma: IndexedSeq[Double],
sigma2: IndexedSeq[Double],
offset: IndexedSeq[IndexedSeq[Int]]
)
object Posterior{
def apply(json: JsValue): Posterior = Posterior(
(json \\ "beta").as[List[Double]].toIndexedSeq,
(json \\ "eta").as[List[Double]].toIndexedSeq,
(json \\ "gamma").as[List[Double]].toIndexedSeq,
(json \\ "delta").as[List[Double]].toIndexedSeq,
(json \\ "sigma").as[List[Double]].toIndexedSeq,
(json \\ "sigma2").as[List[Double]].toIndexedSeq,
(json \\ "offset").as[List[List[Int]]].toIndexedSeq.map(i => i.toIndexedSeq)
)
implicit val posteriorWrites = new Writes[Posterior] {
def writes(data: Posterior) = { Json.obj(
"beta" -> data.beta,
"eta" -> data.eta,
"gamma" -> data.gamma,
"delta" -> data.delta,
"sigma" -> data.sigma,
"sigma2" -> data.sigma2,
"offset" -> data.offset
)
}
}
def fromSeq(paramSeq: IndexedSeq[Parameters]) = Posterior(
paramSeq.map(_.beta),
paramSeq.map(_.eta),
paramSeq.map(_.gamma),
paramSeq.map(_.delta),
paramSeq.map(_.sigma),
paramSeq.map(_.sigma2),
paramSeq.map(_.offset)
)
def getMarginalMedian(posterior: Posterior): Parameters = {
val medBeta = posterior.beta.toEmpirical.percentile(0.5)
val medEta = posterior.eta.toEmpirical.percentile(0.5)
val medGamma = posterior.gamma.toEmpirical.percentile(0.5)
val medDelta = posterior.delta.toEmpirical.percentile(0.5)
val medSigma = posterior.sigma.toEmpirical.percentile(0.5)
val medSigma2 = posterior.sigma2.toEmpirical.percentile(0.5)
val medOffset = posterior.offset.transpose.map(i =>
i.map(_.toDouble).toEmpirical.percentile(0.5).toInt)
// Create parameter object to use in model
Parameters(medBeta, medEta, medGamma, medDelta, medSigma, medSigma2, medOffset)
}
}
|
tearne/Sampler
|
sampler-examples/src/main/scala/sampler/example/abc/flockMortality/util/Posterior.scala
|
Scala
|
apache-2.0
| 2,342 |
package breeze.optimize
import breeze.math.{MutablizingAdaptor, CoordinateSpace, MutableCoordinateSpace}
import breeze.util.Implicits._
import breeze.optimize.FirstOrderMinimizer.OptParams
import breeze.linalg.DenseVector
import breeze.linalg.operators.{OpMulMatrix, BinaryOp}
import breeze.linalg.support.CanCopy
/**
*
* @author dlwh
*/
trait OptimizationPackage[Function, Vector] {
def minimize(fn: Function, init: Vector, options: OptimizationOption*):Vector
}
object OptimizationPackage {
class FirstOrderOptimizationPackage[DF, Vector]()(implicit coord: MutableCoordinateSpace[Vector, Double],
df: DF <:< DiffFunction[Vector]) extends OptimizationPackage[DF, Vector] {
def minimize(fn: DF, init: Vector, options: OptimizationOption*):Vector = {
options.foldLeft(OptParams())( (a,b) => b apply a).minimize(new CachedDiffFunction(fn)(coord.copy), init)
}
}
implicit def firstOrderPackage[DF, Vector](implicit coord: MutableCoordinateSpace[Vector, Double], df: DF <:< DiffFunction[Vector]) = new FirstOrderOptimizationPackage[DF, Vector]()
class SecondOrderOptimizationPackage[Vector, Hessian]()(implicit coord: MutableCoordinateSpace[Vector, Double],
mult: OpMulMatrix.Impl2[Hessian, Vector, Vector]) extends OptimizationPackage[SecondOrderFunction[Vector, Hessian], Vector] {
def minimize(fn: SecondOrderFunction[Vector, Hessian], init: Vector, options: OptimizationOption*):Vector = {
val params = options.foldLeft(OptParams())( (a,b) => b apply a)
if(params.useL1) throw new UnsupportedOperationException("Can't use L1 with second order optimizer right now")
val minimizer = new TruncatedNewtonMinimizer[Vector,Hessian](params.maxIterations, params.tolerance, params.regularization)
minimizer.minimize(fn, init)
}
}
implicit def secondOrderPackage[Vector, Hessian](implicit coord: MutableCoordinateSpace[Vector, Double],
mult: OpMulMatrix.Impl2[Hessian, Vector, Vector]) = new SecondOrderOptimizationPackage[Vector, Hessian]()
class FirstOrderStochasticOptimizationPackage[Vector]()(implicit coord: MutableCoordinateSpace[Vector, Double]) extends OptimizationPackage[StochasticDiffFunction[Vector], Vector] {
def minimize(fn: StochasticDiffFunction[Vector], init: Vector, options: OptimizationOption*):Vector = {
options.foldLeft(OptParams())( (a,b) => b apply a).iterations(fn, init).last.x
}
}
implicit def firstOrderStochasticPackage[Vector](implicit coord: MutableCoordinateSpace[Vector, Double]) = new FirstOrderStochasticOptimizationPackage[Vector]()
class FirstOrderBatchOptimizationPackage[Vector]()(implicit coord: MutableCoordinateSpace[Vector, Double]) extends OptimizationPackage[BatchDiffFunction[Vector], Vector] {
def minimize(fn: BatchDiffFunction[Vector], init: Vector, options: OptimizationOption*):Vector = {
options.foldLeft(OptParams())( (a,b) => b apply a).iterations(new CachedBatchDiffFunction(fn)(coord.copy), init).last.x
}
}
implicit def firstOrderBatchPackage[Vector](implicit coord: MutableCoordinateSpace[Vector, Double]) = new FirstOrderBatchOptimizationPackage[Vector]()
}
trait OptimizationPackageLowPriority {
class ImmutableFirstOrderOptimizationPackage[DF, Vector]()(implicit coord: CoordinateSpace[Vector, Double],
df: DF <:< DiffFunction[Vector]) extends OptimizationPackage[DF, Vector] {
def minimize(fn: DF, init: Vector, options: OptimizationOption*):Vector = {
val mut = MutablizingAdaptor.ensureMutable(coord)
import mut._
val wrapped = fn.throughLens[Wrapper]
val res = options.foldLeft(OptParams())( (a,b) => b apply a).minimize(new CachedDiffFunction(wrapped)(mutaVspace.copy), wrap(init))
unwrap(res)
}
}
implicit def imFirstOrderPackage[DF, Vector](implicit coord: CoordinateSpace[Vector, Double], df: DF <:< DiffFunction[Vector]) = new ImmutableFirstOrderOptimizationPackage[DF, Vector]()
}
|
eponvert/breeze
|
src/main/scala/breeze/optimize/OptimizationPackage.scala
|
Scala
|
apache-2.0
| 4,121 |
package dotty.tools
package dotc
package parsing
import core._, ast._
import Trees._
object desugarPackage extends DeSugarTest {
def test() = {
reset()
val start = System.nanoTime()
val startNodes = Trees.ntrees
parseDir("./src")
val ms1 = (System.nanoTime() - start)/1000000
val nodes = Trees.ntrees
val buf = parsedTrees map desugarTree
val ms2 = (System.nanoTime() - start)/1000000
println(s"$parsed files parsed in ${ms1}ms, ${nodes - startNodes} nodes desugared in ${ms2-ms1}ms, total trees created = ${Trees.ntrees - startNodes}")
ctx.reporter.printSummary(ctx)
}
def main(args: Array[String]): Unit = {
// parse("/Users/odersky/workspace/scala/src/compiler/scala/tools/nsc/doc/model/ModelFactoryTypeSupport.scala")
for (i <- 0 until 10) test()
}
}
|
som-snytt/dotty
|
compiler/test/dotty/tools/dotc/parsing/desugarPackage.scala
|
Scala
|
apache-2.0
| 814 |
/////////////////////////////////////////////////////////////////////
//// ////
//// WISHBONE revB.2 compliant I2C Master controller Top-level ////
//// ////
//// ////
//// Author: Richard Herveille ////
//// [email protected] ////
//// www.asics.ws ////
//// ////
//// Downloaded from: http://www.opencores.org/projects/i2c/ ////
//// ////
/////////////////////////////////////////////////////////////////////
//// ////
//// Copyright (C) 2001 Richard Herveille ////
//// [email protected] ////
//// ////
//// This source file may be used and distributed without ////
//// restriction provided that this copyright statement is not ////
//// removed from the file and that any derivative work contains ////
//// the original copyright notice and the associated disclaimer.////
//// ////
//// THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY ////
//// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED ////
//// TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS ////
//// FOR A PARTICULAR PURPOSE. IN NO EVENT SHALL THE AUTHOR ////
//// OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, ////
//// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ////
//// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE ////
//// GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR ////
//// BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF ////
//// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ////
//// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT ////
//// OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ////
//// POSSIBILITY OF SUCH DAMAGE. ////
//// ////
/////////////////////////////////////////////////////////////////////
// This code was re-written in Chisel by SiFive, Inc.
// See LICENSE for license details.
// WISHBONE interface replaced by Tilelink2
package sifive.blocks.devices.i2c
import Chisel.{defaultCompileOptions => _, _}
import freechips.rocketchip.util.CompileOptions.NotStrictInferReset
import freechips.rocketchip.config.{Field, Parameters}
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.interrupts._
import freechips.rocketchip.prci._
import freechips.rocketchip.regmapper._
import freechips.rocketchip.subsystem._
import freechips.rocketchip.tilelink._
import freechips.rocketchip.devices.tilelink._
import freechips.rocketchip.util.{AsyncResetRegVec, Majority}
import sifive.blocks.util._
case class I2CParams(
address: BigInt,
controlXType: ClockCrossingType = NoCrossing,
intXType: ClockCrossingType = NoCrossing) extends DeviceParams
class I2CPin extends Bundle {
val in = Bool(INPUT)
val out = Bool(OUTPUT)
val oe = Bool(OUTPUT)
}
class I2CPort extends Bundle {
val scl = new I2CPin
val sda = new I2CPin
}
abstract class I2C(busWidthBytes: Int, params: I2CParams)(implicit p: Parameters)
extends IORegisterRouter(
RegisterRouterParams(
name = "i2c",
compat = Seq("sifive,i2c0"),
base = params.address,
beatBytes = busWidthBytes),
new I2CPort)
with HasInterruptSources {
def nInterrupts = 1
lazy val module = new LazyModuleImp(this) {
val I2C_CMD_NOP = UInt(0x00)
val I2C_CMD_START = UInt(0x01)
val I2C_CMD_STOP = UInt(0x02)
val I2C_CMD_WRITE = UInt(0x04)
val I2C_CMD_READ = UInt(0x08)
class PrescalerBundle extends Bundle{
val hi = UInt(8.W)
val lo = UInt(8.W)
}
class ControlBundle extends Bundle{
val coreEn = Bool()
val intEn = Bool()
val reserved = UInt(6.W)
}
class CommandBundle extends Bundle{
val start = Bool()
val stop = Bool()
val read = Bool()
val write = Bool()
val ack = Bool()
val reserved = UInt(2.W)
val irqAck = Bool()
}
class StatusBundle extends Bundle{
val receivedAck = Bool() // received aknowledge from slave
val busy = Bool()
val arbLost = Bool()
val reserved = UInt(3.W)
val transferInProgress = Bool()
val irqFlag = Bool()
}
// control state visible to SW/driver
val prescaler = Reg(init = (new PrescalerBundle).fromBits(0xFFFF.U))
val control = Reg(init = (new ControlBundle).fromBits(0.U))
val transmitData = Reg(init = UInt(0, 8.W))
val receivedData = Reg(init = UInt(0, 8.W))
val cmd = Reg(init = (new CommandBundle).fromBits(0.U))
val status = Reg(init = (new StatusBundle).fromBits(0.U))
//////// Bit level ////////
port.scl.out := false.B // i2c clock line output
port.sda.out := false.B // i2c data line output
// filter SCL and SDA signals; (attempt to) remove glitches
val filterCnt = Reg(init = UInt(0, 14.W))
when ( !control.coreEn ) {
filterCnt := 0.U
} .elsewhen (!(filterCnt.orR)) {
filterCnt := Cat(prescaler.hi, prescaler.lo) >> 2 //16x I2C bus frequency
} .otherwise {
filterCnt := filterCnt - 1.U
}
val fSCL = Reg(init = UInt(0x7, 3.W))
val fSDA = Reg(init = UInt(0x7, 3.W))
when (!(filterCnt.orR)) {
fSCL := Cat(fSCL, port.scl.in)
fSDA := Cat(fSDA, port.sda.in)
}
val sSCL = Reg(init = true.B, next = Majority(fSCL))
val sSDA = Reg(init = true.B, next = Majority(fSDA))
val dSCL = Reg(init = true.B, next = sSCL)
val dSDA = Reg(init = true.B, next = sSDA)
val dSCLOen = Reg(next = port.scl.oe) // delayed scl_oen
// detect start condition => detect falling edge on SDA while SCL is high
// detect stop condition => detect rising edge on SDA while SCL is high
val startCond = Reg(init = false.B, next = !sSDA && dSDA && sSCL)
val stopCond = Reg(init = false.B, next = sSDA && !dSDA && sSCL)
// master drives SCL high, but another master pulls it low
// master start counting down its low cycle now (clock synchronization)
val sclSync = dSCL && !sSCL && port.scl.oe
// slave_wait is asserted when master wants to drive SCL high, but the slave pulls it low
// slave_wait remains asserted until the slave releases SCL
val slaveWait = Reg(init = false.B)
slaveWait := (port.scl.oe && !dSCLOen && !sSCL) || (slaveWait && !sSCL)
val clkEn = Reg(init = true.B) // clock generation signals
val cnt = Reg(init = UInt(0, 16.W)) // clock divider counter (synthesis)
// generate clk enable signal
when (!(cnt.orR) || !control.coreEn || sclSync ) {
cnt := Cat(prescaler.hi, prescaler.lo)
clkEn := true.B
}
.elsewhen (slaveWait) {
clkEn := false.B
}
.otherwise {
cnt := cnt - 1.U
clkEn := false.B
}
val sclOen = Reg(init = true.B)
port.scl.oe := !sclOen
val sdaOen = Reg(init = true.B)
port.sda.oe := !sdaOen
val sdaChk = Reg(init = false.B) // check SDA output (Multi-master arbitration)
val transmitBit = Reg(init = false.B)
val receivedBit = Reg(Bool())
when (sSCL && !dSCL) {
receivedBit := sSDA
}
val bitCmd = Reg(init = UInt(0, 4.W)) // command (from byte controller)
val bitCmdStop = Reg(init = false.B)
when (clkEn) {
bitCmdStop := bitCmd === I2C_CMD_STOP
}
val bitCmdAck = Reg(init = false.B)
val (s_bit_idle ::
s_bit_start_a :: s_bit_start_b :: s_bit_start_c :: s_bit_start_d :: s_bit_start_e ::
s_bit_stop_a :: s_bit_stop_b :: s_bit_stop_c :: s_bit_stop_d ::
s_bit_rd_a :: s_bit_rd_b :: s_bit_rd_c :: s_bit_rd_d ::
s_bit_wr_a :: s_bit_wr_b :: s_bit_wr_c :: s_bit_wr_d :: Nil) = Enum(UInt(), 18)
val bitState = Reg(init = s_bit_idle)
val arbLost = Reg(init = false.B, next = (sdaChk && !sSDA && sdaOen) | ((bitState =/= s_bit_idle) && stopCond && !bitCmdStop))
// bit FSM
when (arbLost) {
bitState := s_bit_idle
bitCmdAck := false.B
sclOen := true.B
sdaOen := true.B
sdaChk := false.B
}
.otherwise {
bitCmdAck := false.B
when (clkEn) {
switch (bitState) {
is (s_bit_idle) {
switch (bitCmd) {
is (I2C_CMD_START) { bitState := s_bit_start_a }
is (I2C_CMD_STOP) { bitState := s_bit_stop_a }
is (I2C_CMD_WRITE) { bitState := s_bit_wr_a }
is (I2C_CMD_READ) { bitState := s_bit_rd_a }
}
sdaChk := false.B
}
is (s_bit_start_a) {
bitState := s_bit_start_b
sclOen := sclOen
sdaOen := true.B
sdaChk := false.B
}
is (s_bit_start_b) {
bitState := s_bit_start_c
sclOen := true.B
sdaOen := true.B
sdaChk := false.B
}
is (s_bit_start_c) {
bitState := s_bit_start_d
sclOen := true.B
sdaOen := false.B
sdaChk := false.B
}
is (s_bit_start_d) {
bitState := s_bit_start_e
sclOen := true.B
sdaOen := false.B
sdaChk := false.B
}
is (s_bit_start_e) {
bitState := s_bit_idle
bitCmdAck := true.B
sclOen := false.B
sdaOen := false.B
sdaChk := false.B
}
is (s_bit_stop_a) {
bitState := s_bit_stop_b
sclOen := false.B
sdaOen := false.B
sdaChk := false.B
}
is (s_bit_stop_b) {
bitState := s_bit_stop_c
sclOen := true.B
sdaOen := false.B
sdaChk := false.B
}
is (s_bit_stop_c) {
bitState := s_bit_stop_d
sclOen := true.B
sdaOen := false.B
sdaChk := false.B
}
is (s_bit_stop_d) {
bitState := s_bit_idle
bitCmdAck := true.B
sclOen := true.B
sdaOen := true.B
sdaChk := false.B
}
is (s_bit_rd_a) {
bitState := s_bit_rd_b
sclOen := false.B
sdaOen := true.B
sdaChk := false.B
}
is (s_bit_rd_b) {
bitState := s_bit_rd_c
sclOen := true.B
sdaOen := true.B
sdaChk := false.B
}
is (s_bit_rd_c) {
bitState := s_bit_rd_d
sclOen := true.B
sdaOen := true.B
sdaChk := false.B
}
is (s_bit_rd_d) {
bitState := s_bit_idle
bitCmdAck := true.B
sclOen := false.B
sdaOen := true.B
sdaChk := false.B
}
is (s_bit_wr_a) {
bitState := s_bit_wr_b
sclOen := false.B
sdaOen := transmitBit
sdaChk := false.B
}
is (s_bit_wr_b) {
bitState := s_bit_wr_c
sclOen := true.B
sdaOen := transmitBit
sdaChk := false.B
}
is (s_bit_wr_c) {
bitState := s_bit_wr_d
sclOen := true.B
sdaOen := transmitBit
sdaChk := true.B
}
is (s_bit_wr_d) {
bitState := s_bit_idle
bitCmdAck := true.B
sclOen := false.B
sdaOen := transmitBit
sdaChk := false.B
}
}
}
}
//////// Byte level ///////
val load = Reg(init = false.B) // load shift register
val shift = Reg(init = false.B) // shift shift register
val cmdAck = Reg(init = false.B) // also done
val receivedAck = Reg(init = false.B) // from I2C slave
val go = (cmd.read | cmd.write | cmd.stop) & !cmdAck
val bitCnt = Reg(init = UInt(0, 3.W))
when (load) {
bitCnt := 0x7.U
}
.elsewhen (shift) {
bitCnt := bitCnt - 1.U
}
val bitCntDone = !(bitCnt.orR)
// receivedData is used as shift register directly
when (load) {
receivedData := transmitData
}
.elsewhen (shift) {
receivedData := Cat(receivedData, receivedBit)
}
val (s_byte_idle :: s_byte_start :: s_byte_read :: s_byte_write :: s_byte_ack :: s_byte_stop :: Nil) = Enum(UInt(), 6)
val byteState = Reg(init = s_byte_idle)
when (arbLost) {
bitCmd := I2C_CMD_NOP
transmitBit := false.B
shift := false.B
load := false.B
cmdAck := false.B
byteState := s_byte_idle
receivedAck := false.B
}
.otherwise {
transmitBit := receivedData(7)
shift := false.B
load := false.B
cmdAck := false.B
switch (byteState) {
is (s_byte_idle) {
when (go) {
when (cmd.start) {
byteState := s_byte_start
bitCmd := I2C_CMD_START
}
.elsewhen (cmd.read) {
byteState := s_byte_read
bitCmd := I2C_CMD_READ
}
.elsewhen (cmd.write) {
byteState := s_byte_write
bitCmd := I2C_CMD_WRITE
}
.otherwise { // stop
byteState := s_byte_stop
bitCmd := I2C_CMD_STOP
}
load := true.B
}
}
is (s_byte_start) {
when (bitCmdAck) {
when (cmd.read) {
byteState := s_byte_read
bitCmd := I2C_CMD_READ
}
.otherwise {
byteState := s_byte_write
bitCmd := I2C_CMD_WRITE
}
load := true.B
}
}
is (s_byte_write) {
when (bitCmdAck) {
when (bitCntDone) {
byteState := s_byte_ack
bitCmd := I2C_CMD_READ
}
.otherwise {
byteState := s_byte_write
bitCmd := I2C_CMD_WRITE
shift := true.B
}
}
}
is (s_byte_read) {
when (bitCmdAck) {
when (bitCntDone) {
byteState := s_byte_ack
bitCmd := I2C_CMD_WRITE
}
.otherwise {
byteState := s_byte_read
bitCmd := I2C_CMD_READ
}
shift := true.B
transmitBit := cmd.ack
}
}
is (s_byte_ack) {
when (bitCmdAck) {
when (cmd.stop) {
byteState := s_byte_stop
bitCmd := I2C_CMD_STOP
}
.otherwise {
byteState := s_byte_idle
bitCmd := I2C_CMD_NOP
// generate command acknowledge signal
cmdAck := true.B
}
// assign ack_out output to bit_controller_rxd (contains last received bit)
receivedAck := receivedBit
transmitBit := true.B
}
.otherwise {
transmitBit := cmd.ack
}
}
is (s_byte_stop) {
when (bitCmdAck) {
byteState := s_byte_idle
bitCmd := I2C_CMD_NOP
// assign ack_out output to bit_controller_rxd (contains last received bit)
cmdAck := true.B
}
}
}
}
//////// Top level ////////
// hack: b/c the same register offset is used to write cmd and read status
val nextCmd = Wire(UInt(8.W))
cmd := (new CommandBundle).fromBits(nextCmd)
nextCmd := cmd.asUInt & 0xFE.U // clear IRQ_ACK bit (essentially 1 cycle pulse b/c it is overwritten by regmap below)
// Note: This wins over the regmap update of nextCmd (even if something tries to write them to 1, these values take priority).
when (cmdAck || arbLost) {
cmd.start := false.B // clear command bits when done
cmd.stop := false.B // or when aribitration lost
cmd.read := false.B
cmd.write := false.B
}
status.receivedAck := receivedAck
when (stopCond) {
status.busy := false.B
}
.elsewhen (startCond) {
status.busy := true.B
}
when (arbLost) {
status.arbLost := true.B
}
.elsewhen (cmd.start) {
status.arbLost := false.B
}
status.transferInProgress := cmd.read || cmd.write
status.irqFlag := (cmdAck || arbLost || status.irqFlag) && !cmd.irqAck // interrupt request flag is always generated
val statusReadReady = Reg(init = true.B)
when (cmdAck || arbLost) { // => cmd.read or cmd.write deassert 1 cycle later => transferInProgress deassert 2 cycles later
statusReadReady := false.B // do not allow status read if status.transferInProgress is going to change
}
.elsewhen (!statusReadReady) {
statusReadReady := true.B
}
// statusReadReady,
regmap(
I2CCtrlRegs.prescaler_lo -> Seq(RegField(8, prescaler.lo,
RegFieldDesc("prescaler_lo","I2C prescaler, low byte", reset=Some(0)))),
I2CCtrlRegs.prescaler_hi -> Seq(RegField(8, prescaler.hi,
RegFieldDesc("prescaler_hi","I2C prescaler, high byte", reset=Some(0)))),
I2CCtrlRegs.control -> RegFieldGroup("control",
Some("Control"),
control.elements.map{
case(name, e) => RegField(e.getWidth,
e.asInstanceOf[UInt],
RegFieldDesc(name, s"Sets the ${name}" ,
reset=Some(0)))
}.toSeq),
I2CCtrlRegs.data -> Seq(RegField(8, r = RegReadFn(receivedData), w = RegWriteFn(transmitData),
RegFieldDesc("data","I2C tx and rx data", volatile=true, reset=Some(0)))),
I2CCtrlRegs.cmd_status -> Seq(RegField(8, r = RegReadFn{ ready =>
(statusReadReady, status.asUInt)
},
w = RegWriteFn((valid, data) => {
when (valid) {
statusReadReady := false.B
nextCmd := data
}
true.B
}),
RegFieldDesc("cmd_status","On write, update I2C command. On Read, report I2C status", volatile=true)))
)
// tie off unused bits
control.reserved := 0.U
cmd.reserved := 0.U
status.reserved := 0.U
interrupts(0) := status.irqFlag & control.intEn
}}
class TLI2C(busWidthBytes: Int, params: I2CParams)(implicit p: Parameters)
extends I2C(busWidthBytes, params) with HasTLControlRegMap
case class I2CLocated(loc: HierarchicalLocation) extends Field[Seq[I2CAttachParams]](Nil)
case class I2CAttachParams(
device: I2CParams,
controlWhere: TLBusWrapperLocation = PBUS,
blockerAddr: Option[BigInt] = None,
controlXType: ClockCrossingType = NoCrossing,
intXType: ClockCrossingType = NoCrossing) extends DeviceAttachParams
{
def attachTo(where: Attachable)(implicit p: Parameters): TLI2C = where {
val name = s"i2c_${I2C.nextId()}"
val tlbus = where.locateTLBusWrapper(controlWhere)
val i2cClockDomainWrapper = LazyModule(new ClockSinkDomain(take = None))
val i2c = i2cClockDomainWrapper { LazyModule(new TLI2C(tlbus.beatBytes, device)) }
i2c.suggestName(name)
tlbus.coupleTo(s"device_named_$name") { bus =>
val blockerOpt = blockerAddr.map { a =>
val blocker = LazyModule(new TLClockBlocker(BasicBusBlockerParams(a, tlbus.beatBytes, tlbus.beatBytes)))
tlbus.coupleTo(s"bus_blocker_for_$name") { blocker.controlNode := TLFragmenter(tlbus) := _ }
blocker
}
i2cClockDomainWrapper.clockNode := (controlXType match {
case _: SynchronousCrossing =>
tlbus.dtsClk.map(_.bind(i2c.device))
tlbus.fixedClockNode
case _: RationalCrossing =>
tlbus.clockNode
case _: AsynchronousCrossing =>
val i2cClockGroup = ClockGroup()
i2cClockGroup := where.asyncClockGroupsNode
blockerOpt.map { _.clockNode := i2cClockGroup } .getOrElse { i2cClockGroup }
})
(i2c.controlXing(controlXType)
:= TLFragmenter(tlbus)
:= blockerOpt.map { _.node := bus } .getOrElse { bus })
}
(intXType match {
case _: SynchronousCrossing => where.ibus.fromSync
case _: RationalCrossing => where.ibus.fromRational
case _: AsynchronousCrossing => where.ibus.fromAsync
}) := i2c.intXing(intXType)
i2c
}
}
object I2C {
val nextId = { var i = -1; () => { i += 1; i} }
def makePort(node: BundleBridgeSource[I2CPort], name: String)(implicit p: Parameters): ModuleValue[I2CPort] = {
val i2cNode = node.makeSink()
InModuleBody { i2cNode.makeIO()(ValName(name)) }
}
}
|
sifive/sifive-blocks
|
src/main/scala/devices/i2c/I2C.scala
|
Scala
|
apache-2.0
| 21,714 |
package org.jetbrains.plugins.scala
package util
import com.intellij.openapi.editor.Document
import com.intellij.openapi.project.Project
import com.intellij.openapi.util.text.StringUtil
import com.intellij.psi.codeStyle.CodeStyleSettingsManager
import com.intellij.psi.{PsiDocumentManager, PsiElement}
import org.jetbrains.plugins.scala.extensions._
import org.jetbrains.plugins.scala.lang.formatting.settings.ScalaCodeStyleSettings
import org.jetbrains.plugins.scala.lang.lexer.ScalaTokenTypes
import org.jetbrains.plugins.scala.lang.psi.api.base.{ScInterpolatedStringLiteral, ScLiteral, ScReferenceElement}
import org.jetbrains.plugins.scala.lang.psi.api.expr._
import org.jetbrains.plugins.scala.lang.psi.api.statements.ScFunction
import org.jetbrains.plugins.scala.lang.psi.types.StdType
import scala.collection.mutable.ArrayBuffer
/**
* Nikolay.Tropin
* 2014-03-13
*/
object MultilineStringUtil {
val multilineQuotes = "\\"\\"\\""
val multilineQuotesLength = multilineQuotes.length
def inMultilineString(element: PsiElement): Boolean = {
if (element == null) return false
element.getNode.getElementType match {
case ScalaTokenTypes.tMULTILINE_STRING | ScalaTokenTypes.tINTERPOLATED_MULTILINE_STRING => true
case ScalaTokenTypes.tINTERPOLATED_STRING_END | ScalaTokenTypes.tINTERPOLATED_STRING_INJECTION |
ScalaTokenTypes.tINTERPOLATED_STRING_ESCAPE =>
element.getParent match {
case lit: ScLiteral if lit.isMultiLineString => true
case _ => false
}
case _ => false
}
}
def needAddMethodCallToMLString(stringElement: PsiElement, methodName: String): Boolean = {
var parent = stringElement.getParent
do {
parent match {
case ref: ScReferenceElement => //if (ref.nameId.getText == methodName) return false
case l: ScLiteral => if (!l.isMultiLineString) return false
case i: ScInfixExpr => if (i.operation.getText == methodName) return false
case call: ScMethodCall =>
if (Option(call.getEffectiveInvokedExpr).forall {
case expr: ScExpression => expr.getText endsWith "." + methodName
case _ => false
}) return false
case _: ScParenthesisedExpr =>
case _ => return true
}
parent = parent.getParent
} while (parent != null)
true
}
def needAddStripMargin(element: PsiElement, marginChar: String): Boolean = {
def hasMarginChars(element: PsiElement) = element.getText.replace("\\r", "").split("\\n[ \\t]*\\\\|").length > 1
findAllMethodCallsOnMLString(element, "stripMargin").isEmpty && !hasMarginChars(element)
}
def needAddByType(literal: ScLiteral): Boolean = literal match {
case interpolated: ScInterpolatedStringLiteral => interpolated.reference match {
case Some(ref: ScReferenceExpression) =>
ref.resolve() match {
case funDef: ScFunction =>
val tpe = funDef.returnType
tpe.exists(scType => scType.canonicalText.endsWith("java.lang.String") || scType.canonicalText.endsWith("scala.Predef.String"))
case _ => true
}
case _ => true
}
case _ => true
}
def insertStripMargin(document: Document, literal: ScLiteral, marginChar: Char) {
if (needAddStripMargin(literal, "" + marginChar)) {
document.insertString(literal.getTextRange.getEndOffset,
if (marginChar == '|') ".stripMargin" else ".stripMargin(\\'" + marginChar + "\\')")
}
}
def getMarginChar(element: PsiElement): Char = {
val calls = findAllMethodCallsOnMLString(element, "stripMargin")
val defaultMargin = CodeStyleSettingsManager.getInstance(element.getProject).getCurrentSettings.
getCustomSettings(classOf[ScalaCodeStyleSettings]).MARGIN_CHAR
if (calls.isEmpty) return defaultMargin
calls.apply(0).headOption match {
case Some(ScLiteral(c: Character)) => c
case _ => defaultMargin
}
}
def findAllMethodCallsOnMLString(stringElement: PsiElement, methodName: String): Array[Array[ScExpression]] = {
val calls = new ArrayBuffer[Array[ScExpression]]()
def callsArray = calls.toArray
var prevParent: PsiElement = findParentMLString(stringElement).getOrElse(return Array.empty)
var parent = prevParent.getParent
do {
parent match {
case lit: ScLiteral => if (!lit.isMultiLineString) return Array.empty
case inf: ScInfixExpr =>
if (inf.operation.getText == methodName){
if (prevParent != parent.getFirstChild) return callsArray
calls += Array(inf.rOp)
}
case call: ScMethodCall =>
call.getEffectiveInvokedExpr match {
case ref: ScReferenceExpression if ref.refName == methodName => calls += call.args.exprsArray
case _ =>
}
case exp: ScReferenceExpression =>
if (!exp.getParent.isInstanceOf[ScMethodCall]) {
calls += Array[ScExpression]()
}
case _: ScParenthesisedExpr =>
case _ => return callsArray
}
prevParent = parent
parent = parent.getParent
} while (parent != null)
callsArray
}
def findParentMLString(element: PsiElement): Option[ScLiteral] = {
(Iterator(element) ++ element.parentsInFile).collect {
case lit: ScLiteral if lit.isMultiLineString => lit
}.toStream.headOption
}
def isMLString(element: PsiElement): Boolean = element match {
case lit: ScLiteral if lit.isMultiLineString => true
case _ => false
}
def interpolatorPrefixLength(literal: ScLiteral) = interpolatorPrefix(literal).length
def interpolatorPrefix(literal: ScLiteral) = literal match {
case isl: ScInterpolatedStringLiteral if isl.reference.isDefined => isl.reference.get.refName
case _ => ""
}
def containsArgs(currentArgs: Array[Array[ScExpression]], argsToFind: String*): Boolean = {
val myArgs = argsToFind.sorted
for (arg <- currentArgs) {
val argsString = arg.map(_.getText).sorted
if (myArgs.sameElements(argsString) || myArgs.reverse.sameElements(argsString)) return true
}
false
}
def addMarginsAndFormatMLString(element: PsiElement, document: Document) {
val settings = new MultilineStringSettings(element.getProject)
if (settings.supportLevel != ScalaCodeStyleSettings.MULTILINE_STRING_ALL) return
def insertIndent(lineNumber: Int, indent: Int, marginChar: Option[Char]) {
val lineStart = document.getLineStartOffset(lineNumber)
document.insertString(lineStart, settings.getSmartSpaces(indent) + marginChar.getOrElse(""))
}
PsiDocumentManager.getInstance(element.getProject).doPostponedOperationsAndUnblockDocument(document)
element match {
case literal: ScLiteral if literal.isMultiLineString =>
val firstMLQuote = interpolatorPrefix(literal) + multilineQuotes
val literalOffsets = Seq(literal.getTextRange.getStartOffset, literal.getTextRange.getEndOffset)
val Seq(startLineNumber, endLineNumber) = literalOffsets.map(document.getLineNumber)
val literalStart = literalOffsets(0)
val (startLineOffset, startLineEndOffset) = (document.getLineStartOffset(startLineNumber), document.getLineEndOffset(startLineNumber))
val startsOnNewLine = document.getText.substring(startLineOffset, startLineEndOffset).trim.startsWith(firstMLQuote)
val multipleLines = endLineNumber != startLineNumber
val needNewLineBefore = settings.quotesOnNewLine && multipleLines && !startsOnNewLine
val marginChar = getMarginChar(literal)
extensions.inWriteAction {
if (multipleLines) insertStripMargin(document, literal, marginChar)
if (!needNewLineBefore) {
val quotesIndent = settings.getSmartLength(document.getText.substring(startLineOffset, literalStart))
val marginIndent = quotesIndent + interpolatorPrefixLength(literal) + settings.marginIndent
for (lineNumber <- startLineNumber + 1 to endLineNumber) {
insertIndent(lineNumber, marginIndent, Some(marginChar))
}
} else {
val oldIndent = settings.prefixLength(document.getText.substring(startLineOffset, literalStart))
val quotesIndent = oldIndent + settings.regularIndent
val marginIndent = quotesIndent + interpolatorPrefixLength(literal) + settings.marginIndent
for (lineNumber <- startLineNumber + 1 to endLineNumber) {
insertIndent(lineNumber, marginIndent, Some(marginChar))
}
document.insertString(literalStart, "\\n")
insertIndent(startLineNumber + 1, quotesIndent, None)
}
}
case something => throw new IllegalStateException(s"Need multiline string literal, but get: ${something.getText}")
}
}
}
class MultilineStringSettings(project: Project) {
private val settings = CodeStyleSettingsManager.getInstance(project).getCurrentSettings
private val scalaSettings: ScalaCodeStyleSettings = ScalaCodeStyleSettings.getInstance(project)
val defaultMarginChar = settings.getCustomSettings(classOf[ScalaCodeStyleSettings]).MARGIN_CHAR
val useTabs = settings.useTabCharacter(ScalaFileType.SCALA_FILE_TYPE)
val tabSize = settings.getTabSize(ScalaFileType.SCALA_FILE_TYPE)
val regularIndent = settings.getIndentOptions(ScalaFileType.SCALA_FILE_TYPE).INDENT_SIZE
val marginIndent = scalaSettings.MULTI_LINE_STRING_MARGIN_INDENT
val supportLevel = scalaSettings.MULTILINE_STRING_SUPORT
val quotesOnNewLine = scalaSettings.MULTI_LINE_QUOTES_ON_NEW_LINE
def selectBySettings[T](ifIndent: => T)(ifAll: => T): T = {
scalaSettings.MULTILINE_STRING_SUPORT match {
case ScalaCodeStyleSettings.MULTILINE_STRING_QUOTES_AND_INDENT => ifIndent
case ScalaCodeStyleSettings.MULTILINE_STRING_ALL => ifAll
}
}
def getSmartSpaces(count: Int) = if (useTabs) {
StringUtil.repeat("\\t", count/tabSize) + StringUtil.repeat(" ", count%tabSize)
} else {
StringUtil.repeat(" ", count)
}
def getSmartLength(line: String) = if (useTabs) line.length + line.count(_ == '\\t')*(tabSize - 1) else line.length
def prefixLength(line: String) = if (useTabs) {
val tabsCount = line prefixLength (_ == '\\t')
tabsCount*tabSize + line.substring(tabsCount).prefixLength(_ == ' ')
} else {
line prefixLength (_ == ' ')
}
def getPrefix(line: String) = getSmartSpaces(prefixLength(line))
}
|
SergeevPavel/intellij-scala
|
src/org/jetbrains/plugins/scala/util/MultilineStringUtil.scala
|
Scala
|
apache-2.0
| 10,468 |
package com.lynbrookrobotics.potassium.commons.position
import com.lynbrookrobotics.potassium.{Component, Signal}
import com.lynbrookrobotics.potassium.streams.Stream
import com.lynbrookrobotics.potassium.control.{PIDF, PIDFConfig}
import com.lynbrookrobotics.potassium.tasks.{ContinuousTask, FiniteTask}
import squants.Quantity
import squants.time.{TimeDerivative, TimeIntegral}
trait PositionProperties[S <: Quantity[S], SWithD <: Quantity[SWithD] with TimeIntegral[D], SWithI <: Quantity[SWithI] with TimeDerivative[
I
], D <: Quantity[D] with TimeDerivative[SWithD], I <: Quantity[I] with TimeIntegral[SWithI], U <: Quantity[U]] {
def positionGains: PIDFConfig[S, SWithD, SWithI, D, I, U]
}
trait PositionHardware[S <: Quantity[S]] {
def position: Stream[S]
}
abstract class Position[S <: Quantity[S], SWithD <: Quantity[SWithD] with TimeIntegral[D], SWithI <: Quantity[SWithI] with TimeDerivative[
I
], D <: Quantity[D] with TimeDerivative[SWithD], I <: Quantity[I] with TimeIntegral[SWithI], U <: Quantity[U]](
implicit exD: S => SWithD,
exI: S => SWithI
) {
type Properties <: PositionProperties[S, SWithD, SWithI, D, I, U]
type Hardware <: PositionHardware[S]
object positionControllers {
def positionControl(
target: S
)(implicit properties: Signal[Properties], hardware: Hardware): (Stream[S], Stream[U]) = {
val error = hardware.position.map(target - _)
(error, PIDF.pidf(hardware.position, hardware.position.mapToConstant(target), properties.map(_.positionGains)))
}
}
object positionTasks {
class MoveToPosition(pos: S, tolerance: S)(implicit properties: Signal[Properties], hardware: Hardware, comp: Comp)
extends FiniteTask {
override def onStart(): Unit = {
val (error, control) = positionControllers.positionControl(pos)
comp.setController(control.withCheckZipped(error) { error =>
if (error.abs < tolerance) {
finished()
}
})
}
override def onEnd(): Unit = {
comp.resetToDefault()
}
}
class HoldPosition(pos: S)(implicit properties: Signal[Properties], hardware: Hardware, comp: Comp)
extends ContinuousTask {
override def onStart(): Unit = {
val (_, control) = positionControllers.positionControl(pos)
comp.setController(control)
}
override def onEnd(): Unit = {
comp.resetToDefault()
}
}
}
type Comp <: Component[U]
}
|
Team846/potassium
|
commons/src/main/scala/com/lynbrookrobotics/potassium/commons/position/Position.scala
|
Scala
|
mit
| 2,472 |
/* *\\
** Squants **
** **
** Scala Quantities and Units of Measure Library and DSL **
** (c) 2013-2015, Gary Keorkunian **
** **
\\* */
package squants.photo
import squants._
import squants.time.{ Seconds, TimeIntegral }
/**
* @author garyKeorkunian
* @since 0.1
*
* @param value value in [[squants.photo.LumenSeconds]]
*/
final class LuminousEnergy private (val value: Double, val unit: LuminousEnergyUnit)
extends Quantity[LuminousEnergy]
with TimeIntegral[LuminousFlux] {
def dimension = LuminousEnergy
protected def timeDerived = Lumens(toLumenSeconds)
protected[squants] def time = Seconds(1)
def toLumenSeconds = to(LumenSeconds)
}
object LuminousEnergy extends Dimension[LuminousEnergy] {
private[photo] def apply[A](n: A, unit: LuminousEnergyUnit)(implicit num: Numeric[A]) = new LuminousEnergy(num.toDouble(n), unit)
def apply = parse _
def name = "LuminousEnergy"
def primaryUnit = LumenSeconds
def siUnit = LumenSeconds
def units = Set(LumenSeconds)
}
trait LuminousEnergyUnit extends UnitOfMeasure[LuminousEnergy] with UnitConverter {
def apply[A](n: A)(implicit num: Numeric[A]) = LuminousEnergy(num.toDouble(n), this)
}
object LumenSeconds extends LuminousEnergyUnit with PrimaryUnit with SiUnit {
val symbol = "lmβ
s"
}
object LuminousEnergyConversions {
lazy val lumenSecond = LumenSeconds(1)
implicit class LuminousEnergyConversions[A](n: A)(implicit num: Numeric[A]) {
def lumenSeconds = LumenSeconds(n)
}
implicit object LuminousEnergyNumeric extends AbstractQuantityNumeric[LuminousEnergy](LuminousEnergy.primaryUnit)
}
|
rmihael/squants
|
shared/src/main/scala/squants/photo/LuminousEnergy.scala
|
Scala
|
apache-2.0
| 1,993 |
/*
* Copyright 2013 agwlvssainokuni
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package models
import java.sql.Connection
import java.util.Date
import anorm._
import anorm.SQL
import anorm.SqlParser._
import anorm.sqlToSimple
import anorm.toParameterValue
import play.api.Play.current
import play.api.cache.Cache
import play.api.libs._
case class Member(email: String, nickname: String, birthday: Option[Date]) {
var id: Pk[Long] = NotAssigned
}
object Member {
val parser = {
long("members.id") ~ str("members.email") ~ str("members.nickname") ~ (date("members.birthday")?) map {
case id ~ email ~ nickname ~ birthday =>
val entity = Member(email, nickname, birthday)
entity.id = Id(id)
entity
}
}
def count()(implicit c: Connection): Long =
SQL("""
SELECT COUNT(*) FROM members
""").single(scalar[Long])
def list(pageNo: Long, pageSize: Long)(implicit c: Connection): Seq[Member] =
SQL("""
SELECT id, email, nickname, birthday FROM members
ORDER BY id
LIMIT {limit} OFFSET {offset}
""").on(
'limit -> pageSize,
'offset -> pageSize * pageNo).list(parser)
def find(id: Long)(implicit c: Connection): Option[Member] =
SQL("""
SELECT id, email, nickname, birthday FROM members
WHERE
id = {id}
""").on(
'id -> id).singleOpt(parser)
def get(id: Long)(implicit c: Connection): Option[Member] =
Cache.getOrElse(cacheName(id)) {
find(id)
}
def create(member: Member)(implicit c: Connection): Option[Long] =
SQL("""
INSERT INTO members (
email,
nickname,
birthday,
passwd,
updated_at
) VALUES (
{email},
{nickname},
{birthday},
'',
CURRENT_TIMESTAMP
)
""").on(
'email -> member.email, 'nickname -> member.nickname, 'birthday -> member.birthday).executeUpdate() match {
case 1 =>
SQL("""SELECT IDENTITY() FROM dual""").singleOpt(scalar[Long])
case _ => None
}
def update(id: Long, member: Member)(implicit c: Connection): Boolean =
SQL("""
UPDATE members
SET
email = {email},
nickname = {nickname},
birthday = {birthday},
updated_at = CURRENT_TIMESTAMP
WHERE
id = {id}
""").on(
'id -> id,
'email -> member.email, 'nickname -> member.nickname, 'birthday -> member.birthday).executeUpdate() match {
case 1 =>
Cache.remove(cacheName(id))
true
case _ => false
}
def updatePw(id: Long, passwd: String)(implicit c: Connection): Boolean =
SQL("""
UPDATE members
SET
passwd = {passwd}
WHERE
id = {id}
""").on(
'id -> id,
'passwd -> passwdHash(passwd)).executeUpdate() match {
case 1 =>
Cache.remove(cacheName(id))
true
case _ => false
}
def delete(id: Long)(implicit c: Connection): Boolean =
SQL("""
DELETE FROM members
WHERE
id = {id}
""").on(
'id -> id).executeUpdate() match {
case 1 =>
Cache.remove(cacheName(id))
true
case _ => false
}
def authenticate(email: String, passwd: String)(implicit c: Connection): Option[Long] =
SQL("""
SELECT id FROM members
WHERE
email = {email}
AND
passwd = {passwd}
""").on(
'email -> email, 'passwd -> passwdHash(passwd)).singleOpt(scalar[Long])
def tryLock(id: Long)(implicit c: Connection): Option[Long] =
SQL("""
SELECT id FROM members
WHERE
id = {id}
FOR UPDATE
""").on(
'id -> id).singleOpt(scalar[Long])
def exists(email: String)(implicit c: Connection): Option[Long] =
SQL("""
SELECT id FROM members
WHERE
email = {email}
""").on(
'email -> email).singleOpt(scalar[Long])
private def cacheName(id: Long): String = "member." + id
private def passwdHash(passwd: String): String = Crypto.sign(passwd)
}
|
agwlvssainokuni/lifelog
|
lifelog-common/app/models/Member.scala
|
Scala
|
apache-2.0
| 4,783 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.dllib.nn.mkldnn
import com.intel.analytics.bigdl.mkl.Memory
import com.intel.analytics.bigdl.dllib.nn.MklInt8Convertible
import com.intel.analytics.bigdl.dllib.nn.abstractnn.{AbstractModule, Activity}
import com.intel.analytics.bigdl.dllib.nn.mkldnn.Phase.InferencePhase
import com.intel.analytics.bigdl.dllib.tensor.Tensor
import com.intel.analytics.bigdl.dllib.utils.T
private[bigdl] object Utils {
def copyMaskAndScales(from: MemoryData, to: MemoryData): Unit = {
// here we check the from and to, they should not be null ideally
// but if the model is mixin with blas layer, it may be null
if (from != null && to != null && to.scales.isEmpty) {
to.setScales(from.scales.clone())
to.setMask(from.mask)
}
}
def copyMaskAndScales(from: Array[MemoryData], to: Array[MemoryData]): Unit = {
// here we check the from and to, they should not be null ideally
// but if the model is mixin with blas layer, it may be null
if (from == null || to == null) return
val valid = (from.length == 1 || to.length == 1) || // the ConcatTable or JoinTable
(from.length == to.length) // the same length of from and to.
// if from has scales but to has no, copy them
val needCopy = from.ne(to) && from.forall(_.scales.nonEmpty) && to.forall(_.scales.isEmpty)
if (valid && needCopy) {
if (from.length == to.length) {
to.zip(from).foreach(x => if (x._1.scales.isEmpty) {
x._1.setScales(x._2.scales)
x._1.setMask(x._2.mask)
})
} else if (to.length == 1) {
to.head.setScales(from.map(_.scales).transpose.map(_.max))
require(from.map(_.mask).distinct.length == 1, s"only support the same mask")
to.head.setMask(from.map(_.mask).distinct.head)
} else if (to.length > 1) {
to.foreach(_.setScales(from.head.scales))
to.foreach(_.setMask(from.head.mask))
}
}
}
def getDefaultFormat(memoryData: MemoryData, isInOrOut: Boolean = true): Int = {
memoryData.shape.length match {
case 2 => if (isInOrOut) Memory.Format.nc else Memory.Format.oi
case 4 => if (isInOrOut) Memory.Format.nchw else Memory.Format.oihw
case _ => throw new UnsupportedOperationException("Linear only supports 2-D or 4-D")
}
}
private def denseTensor(format: MemoryData, tensor: Tensor[Float],
isInOrOut: Boolean = true, runtime: MklDnnRuntime): Tensor[Float] = {
val reorder = ReorderMemory(HeapData(format.shape, getDefaultFormat(format, isInOrOut)))
reorder.setRuntime(runtime)
reorder.initFwdPrimitives(Array(format), InferencePhase)
reorder.forward(tensor).toTensor[Float]
}
private def denseActivity(formats: Array[MemoryData], activity: Activity,
isInOrOut: Boolean = true, runtime: MklDnnRuntime): Activity = {
val ret = if (formats.length > 1) { // table
require(formats.length == activity.toTable.length(),
s"formats should be the same as activity")
val table = T()
var i = 1
while (i <= formats.length) {
val format = formats(i - 1)
val tensor = activity.toTable.get[Tensor[Float]](i).get
table(i) = denseTensor(format, tensor, isInOrOut, runtime)
i += 1
}
table
} else { // tensor
denseTensor(formats(0), activity.toTensor[Float], isInOrOut, runtime)
}
ret
}
def getDenseIn(module: MklInt8Convertible, input: Activity): Activity = {
if (module.isInstanceOf[MklDnnModule]) {
val mklDnnLayer = module.asInstanceOf[MklDnnModule]
Utils.denseActivity(mklDnnLayer.inputFormats(), input, true, mklDnnLayer.getRuntime)
} else {
input
}
}
def getDenseOut(module: MklInt8Convertible, output: Activity): Activity = {
if (module.isInstanceOf[MklDnnModule]) {
val mklDnnLayer = module.asInstanceOf[MklDnnModule]
Utils.denseActivity(mklDnnLayer.outputFormats(), output, true, mklDnnLayer.getRuntime)
} else {
output
}
}
private def setConvNegativeInput(module: MklInt8Convertible, input: Activity): Unit = {
if (module.isInstanceOf[SpatialConvolution]) {
val conv = module.asInstanceOf[SpatialConvolution]
val denseIn = getDenseIn(module, input)
val min = denseIn.toTensor[Float].min()
if (min >= 0.0f) {
conv.negativeInput = false
}
}
}
def calcScales(module: AbstractModule[_, _, _], input: Activity): Unit = {
module match {
case mkldnnModule: MklInt8Convertible =>
mkldnnModule.calcScales(input)
Utils.setConvNegativeInput(mkldnnModule, input)
case _ =>
}
}
def getOutput(module: AbstractModule[_, _, _], input: Activity): Activity = {
module match {
case mklDnnModule: MklDnnModule => module.output.asInstanceOf[Activity]
case _ => module.output.asInstanceOf[Activity]
}
}
}
|
intel-analytics/BigDL
|
scala/dllib/src/main/scala/com/intel/analytics/bigdl/dllib/nn/mkldnn/Utils.scala
|
Scala
|
apache-2.0
| 5,488 |
/*
* Copyright 2016 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package com.linkedin.drelephant.util
import java.io.{ByteArrayInputStream, ByteArrayOutputStream, InputStream}
import java.net.URI
import org.apache.commons.io.IOUtils
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FSDataInputStream, FileStatus, FileSystem, Path, PathFilter, PositionedReadable}
import org.apache.hadoop.io.compress.CompressionInputStream
import org.apache.log4j.Logger
import org.apache.spark.SparkConf
import org.apache.spark.io.LZ4CompressionCodec
import org.mockito.BDDMockito
import org.scalatest.{FunSpec, OptionValues}
import org.scalatest.mockito.MockitoSugar
import org.xerial.snappy.SnappyOutputStream
class SparkUtilsTest extends FunSpec with org.scalatest.Matchers with OptionValues with MockitoSugar {
describe("SparkUtils") {
describe(".fileSystemAndPathForEventLogDir") {
it("returns a filesystem + path based on uri from fetcherConfg") {
val hadoopConfiguration = new Configuration(false)
val sparkConf = new SparkConf()
val sparkUtils = new SparkUtils {
override lazy val logger = mock[Logger]
override lazy val hadoopUtils = mock[HadoopUtils]
override lazy val defaultEnv = Map.empty[String, String]
}
val (fs, path) = sparkUtils.fileSystemAndPathForEventLogDir(hadoopConfiguration,
sparkConf,
Some("webhdfs://nn1.grid.example.com:50070/logs/spark"))
fs.getUri.toString should be("webhdfs://nn1.grid.example.com:50070")
path should be(new Path("/logs/spark"))
}
it("returns a webhdfs filesystem + path based on spark.eventLog.dir when it is a webhdfs URL") {
val hadoopConfiguration = new Configuration(false)
val sparkConf = new SparkConf().set("spark.eventLog.dir", "webhdfs://nn1.grid.example.com:50070/logs/spark")
val sparkUtils = new SparkUtils {
override lazy val logger = mock[Logger]
override lazy val hadoopUtils = mock[HadoopUtils]
override lazy val defaultEnv = Map.empty[String, String]
}
val (fs, path) = sparkUtils.fileSystemAndPathForEventLogDir(hadoopConfiguration, sparkConf, None)
fs.getUri.toString should be("webhdfs://nn1.grid.example.com:50070")
path should be(new Path("/logs/spark"))
}
it("returns a webhdfs filesystem + path based on spark.eventLog.dir when it is an hdfs URL") {
val hadoopConfiguration = new Configuration(false)
val sparkConf = new SparkConf().set("spark.eventLog.dir", "hdfs://nn1.grid.example.com:9000/logs/spark")
val sparkUtils = new SparkUtils {
override lazy val logger = mock[Logger]
override lazy val hadoopUtils = mock[HadoopUtils]
override lazy val defaultEnv = Map.empty[String, String]
}
val (fs, path) = sparkUtils.fileSystemAndPathForEventLogDir(hadoopConfiguration, sparkConf, None)
fs.getUri.toString should be("webhdfs://nn1.grid.example.com:50070")
path should be(new Path("/logs/spark"))
}
it("returns a webhdfs filesystem + path based on dfs.nameservices and spark.eventLog.dir when the latter is a path and the dfs.nameservices is configured and available") {
val hadoopConfiguration = new Configuration(false)
hadoopConfiguration.set("dfs.nameservices", "sample")
hadoopConfiguration.set("dfs.ha.namenodes.sample", "ha1,ha2")
hadoopConfiguration.set("dfs.namenode.http-address.sample.ha1", "sample-ha1.grid.example.com:50070")
hadoopConfiguration.set("dfs.namenode.http-address.sample.ha2", "sample-ha2.grid.example.com:50070")
val sparkConf = new SparkConf().set("spark.eventLog.dir", "/logs/spark")
val sparkUtils = new SparkUtils {
override lazy val logger = mock[Logger]
override lazy val hadoopUtils = HadoopUtilsTest.newFakeHadoopUtilsForNameNode(
("sample-ha1.grid.example.com", ("sample-ha1.grid.example.com", "standby")),
("sample-ha2.grid.example.com", ("sample-ha2.grid.example.com", "active"))
)
override lazy val defaultEnv = Map.empty[String, String]
}
val (fs, path) = sparkUtils.fileSystemAndPathForEventLogDir(hadoopConfiguration, sparkConf, None)
fs.getUri.toString should be("webhdfs://sample-ha2.grid.example.com:50070")
path should be(new Path("/logs/spark"))
}
it("returns a webhdfs filesystem + path based on dfs.nameservices and spark.eventLog.dir when the latter is a path and the dfs.nameservices is configured but unavailable") {
val hadoopConfiguration = new Configuration(false)
hadoopConfiguration.set("dfs.nameservices", "sample")
hadoopConfiguration.set("dfs.ha.namenodes.sample", "ha1,ha2")
hadoopConfiguration.set("dfs.namenode.http-address.sample.ha1", "sample-ha1.grid.example.com:50070")
hadoopConfiguration.set("dfs.namenode.http-address.sample.ha2", "sample-ha2.grid.example.com:50070")
hadoopConfiguration.set("dfs.namenode.http-address", "sample.grid.example.com:50070")
val sparkConf = new SparkConf().set("spark.eventLog.dir", "/logs/spark")
val sparkUtils = new SparkUtils {
override lazy val logger = mock[Logger]
override lazy val hadoopUtils = HadoopUtilsTest.newFakeHadoopUtilsForNameNode(
("sample-ha1.grid.example.com", ("sample-ha1.grid.example.com", "standby")),
("sample-ha2.grid.example.com", ("sample-ha2.grid.example.com", "standby"))
)
override lazy val defaultEnv = Map.empty[String, String]
}
val (fs, path) = sparkUtils.fileSystemAndPathForEventLogDir(hadoopConfiguration, sparkConf, None)
fs.getUri.toString should be("webhdfs://sample.grid.example.com:50070")
path should be(new Path("/logs/spark"))
}
it("returns a webhdfs filesystem + path based on dfs.namenode.http-address and spark.eventLog.dir when the latter is a path and dfs.nameservices is not configured") {
val hadoopConfiguration = new Configuration(false)
hadoopConfiguration.set("dfs.namenode.http-address", "sample.grid.example.com:50070")
val sparkConf = new SparkConf().set("spark.eventLog.dir", "/logs/spark")
val sparkUtils = new SparkUtils {
override lazy val logger = mock[Logger]
override lazy val hadoopUtils = HadoopUtilsTest.newFakeHadoopUtilsForNameNode(
("sample-ha1.grid.example.com", ("sample-ha1.grid.example.com", "standby")),
("sample-ha2.grid.example.com", ("sample-ha2.grid.example.com", "active"))
)
override lazy val defaultEnv = Map.empty[String, String]
}
val (fs, path) = sparkUtils.fileSystemAndPathForEventLogDir(hadoopConfiguration, sparkConf, None)
fs.getUri.toString should be("webhdfs://sample.grid.example.com:50070")
path should be(new Path("/logs/spark"))
}
it("throws an exception when spark.eventLog.dir is a path and no namenode is configured at all") {
val hadoopConfiguration = new Configuration(false)
val sparkConf = new SparkConf().set("spark.eventLog.dir", "/logs/spark")
val sparkUtils = new SparkUtils {
override lazy val logger = mock[Logger]
override lazy val hadoopUtils = mock[HadoopUtils]
override lazy val defaultEnv = Map.empty[String, String]
}
an[Exception] should be thrownBy { sparkUtils.fileSystemAndPathForEventLogDir(hadoopConfiguration, sparkConf, None) }
}
}
describe(".pathAndCodecforEventLog") {
it("returns the path and codec for the event log, given the base path and app/attempt information") {
val hadoopConfiguration = new Configuration(false)
val sparkConf =
new SparkConf()
.set("spark.eventLog.dir", "/logs/spark")
.set("spark.eventLog.compress", "true")
val sparkUtils = SparkUtilsTest.newFakeSparkUtilsForEventLog(
new URI("webhdfs://nn1.grid.example.com:50070"),
new Path("/logs/spark"),
new Path("application_1_1.lz4"),
Array.empty[Byte]
)
val (fs, basePath) = sparkUtils.fileSystemAndPathForEventLogDir(hadoopConfiguration, sparkConf, None)
val (path, codec) =
sparkUtils.pathAndCodecforEventLog(sparkConf: SparkConf, fs: FileSystem, basePath: Path, "application_1", Some("1"))
path should be(new Path("webhdfs://nn1.grid.example.com:50070/logs/spark/application_1_1.lz4"))
codec.value should be(a[LZ4CompressionCodec])
}
it("returns the path and codec for the event log, given the base path and appid. Extracts attempt and codec from path") {
val hadoopConfiguration = new Configuration(false)
val sparkConf =
new SparkConf()
.set("spark.eventLog.dir", "/logs/spark")
.set("spark.eventLog.compress", "true")
val sparkUtils = SparkUtilsTest.newFakeSparkUtilsForEventLog(
new URI("webhdfs://nn1.grid.example.com:50070"),
new Path("/logs/spark"),
new Path("application_1_1.lz4"),
Array.empty[Byte]
)
val (fs, basePath) = sparkUtils.fileSystemAndPathForEventLogDir(hadoopConfiguration, sparkConf, None)
val (path, codec) =
sparkUtils.pathAndCodecforEventLog(sparkConf: SparkConf, fs: FileSystem, basePath: Path, "application_1", None)
path should be(new Path("webhdfs://nn1.grid.example.com:50070/logs/spark/application_1_1.lz4"))
codec.value should be(a[LZ4CompressionCodec])
}
}
describe(".withEventLog") {
it("loans the input stream for the event log") {
val expectedLog =
"""{"Event":"SparkListenerApplicationStart","App Name":"app","App ID":"application_1","Timestamp":1,"User":"foo"}"""
val eventLogBytes = {
val bout = new ByteArrayOutputStream()
for {
in <- resource.managed(new ByteArrayInputStream(expectedLog.getBytes("UTF-8")))
out <- resource.managed(new SnappyOutputStream(bout))
} {
IOUtils.copy(in, out)
}
bout.toByteArray
}
val hadoopConfiguration = new Configuration(false)
val sparkConf =
new SparkConf()
.set("spark.eventLog.dir", "/logs/spark")
.set("spark.eventLog.compress", "true")
val sparkUtils = SparkUtilsTest.newFakeSparkUtilsForEventLog(
new URI("webhdfs://nn1.grid.example.com:50070"),
new Path("/logs/spark"),
new Path("application_1_1.snappy"),
eventLogBytes
)
val (fs, basePath) = sparkUtils.fileSystemAndPathForEventLogDir(hadoopConfiguration, sparkConf, None)
val (path, codec) =
sparkUtils.pathAndCodecforEventLog(sparkConf: SparkConf, fs: FileSystem, basePath: Path, "application_1", None)
sparkUtils.withEventLog(fs, path, codec) { in =>
val bout = new ByteArrayOutputStream()
IOUtils.copy(in, bout)
val actualLog = new String(bout.toByteArray, "UTF-8")
actualLog should be(expectedLog)
}
}
}
}
}
object SparkUtilsTest extends MockitoSugar {
def newFakeSparkUtilsForEventLog(
fileSystemUri: URI,
basePath: Path,
filename: Path,
bytes: Array[Byte]
): SparkUtils = new SparkUtils() {
override lazy val logger = mock[Logger]
override lazy val hadoopUtils = mock[HadoopUtils]
override lazy val defaultEnv = Map.empty[String, String]
override def fileSystemAndPathForEventLogDir(
hadoopConfiguration: Configuration,
sparkConf: SparkConf,
uriFromFetcherConf: Option[String]
): (FileSystem, Path) = {
val fs = mock[FileSystem]
val expectedPath = new Path(new Path(fileSystemUri), new Path(basePath, filename))
val expectedFileStatus = {
val fileStatus = mock[FileStatus]
BDDMockito.given(fileStatus.getLen).willReturn(bytes.length.toLong)
BDDMockito.given(fileStatus.getPath()).willReturn(expectedPath)
fileStatus
}
val expectedStatusArray = Array(expectedFileStatus)
val filter = new PathFilter() {
override def accept(file: Path): Boolean = {
file.getName().startsWith("mockAppId");
}
}
BDDMockito.given(fs.getUri).willReturn(fileSystemUri)
BDDMockito.given(fs.exists(expectedPath)).willReturn(true)
BDDMockito.given(fs.getFileStatus(expectedPath)).willReturn(expectedFileStatus)
BDDMockito.given(fs.listStatus(org.mockito.Matchers.refEq(new Path( new Path(fileSystemUri), basePath)),
org.mockito.Matchers.any(filter.getClass))).
willReturn(expectedStatusArray)
BDDMockito.given(fs.open(expectedPath)).willReturn(
new FSDataInputStream(new FakeCompressionInputStream(new ByteArrayInputStream(bytes)))
)
(fs, basePath)
}
}
class FakeCompressionInputStream(in: InputStream) extends CompressionInputStream(in) with PositionedReadable {
override def read(): Int = in.read()
override def read(b: Array[Byte], off: Int, len: Int): Int = in.read(b, off, len)
override def read(pos: Long, buffer: Array[Byte], off: Int, len: Int): Int = ???
override def readFully(pos: Long, buffer: Array[Byte], off: Int, len: Int): Unit = ???
override def readFully(pos: Long, buffer: Array[Byte]): Unit = ???
override def resetState(): Unit = ???
}
}
|
linkedin/dr-elephant
|
test/com/linkedin/drelephant/util/SparkUtilsTest.scala
|
Scala
|
apache-2.0
| 14,141 |
package com.nthportal.shell
package util
import com.nthportal.shell.impl.TestCommand
class CommandTabCompleterTest extends SimpleSpec {
behavior of classOf[CommandTabCompleter].getSimpleName
it should "return an empty seq when given an empty list of arguments" in {
val ctc = new CommandTabCompleter {
override protected def commands = List(TestCommand())
}
ctc.tabComplete(Nil) shouldBe empty
}
}
|
NthPortal/app-shell
|
src/test/scala/com/nthportal/shell/util/CommandTabCompleterTest.scala
|
Scala
|
apache-2.0
| 426 |
package com.github.suzuki0keiichi.nomorescript.trees
case class NoMoreScriptSetter(name: String, target: NoMoreScriptTree, val rhs: NoMoreScriptTree) extends NoMoreScriptTree {
val rootMatcher = "nomorescriptroot\\\\.[a-zA-Z0-9_]+\\\\.([a-zA-Z0-9_.]+)$".r
override def toJs(terminate: Boolean) = {
val lhsJs = (target.toJs(false).mkString(" ") + "." + name) match {
case rootMatcher(name) => name
case name => name
}
val rhsJs = rhs.toJs(false)
val js =
(if (rhsJs.size > 1) {
rhsJs.map(" " + _)
Util.addFirst(rhsJs, lhsJs + " = ")
} else {
Util.addFirst(rhsJs, lhsJs + " = ")
})
if (terminate) {
Util.addLast(js, ";")
} else {
js
}
}
}
|
suzuki0keiichi/nomorescript
|
nomorescript-plugin/src/main/scala/com/github/suzuki0keiichi/nomorescript/trees/NoMoreScriptSetter.scala
|
Scala
|
mit
| 763 |
package group.matsen.phylohmc
import spire.algebra.Order._
import spire.algebra.{Field, Signed}
import spire.std.seq._
import spire.syntax.order._
import spire.syntax.vectorSpace._
trait ReflectiveLeapProg[R, N, D <: Int with Singleton] extends NumericalDynamics[R, N, D] {
val barrier = Field[R].fromDouble(0.0)
def leapprog(eps: R)(z: ZZ): ZZ = {
val halfEps = eps / 2
val pp = z.p - halfEps *: z.dU
val Kp = K(pp)
val qp = z.q.modifyLengths(l => l + eps *: Kp._2)
val qpp = qp.modifyLengths(l => l.zipWithIndex.map(Function.tupled((li, i) => if (qp.isInternal(i)) Signed[R].abs(li) else Signed[R].abs(li - barrier) + barrier)))
val ppp = (qp.lengths, pp, pp.indices).zipped.map((qi, pi, i) => if (qp.isInternal(i)) pi else if (qi < barrier) -pi else pi)
val zp = (z.q.lengths, Kp._2).zipped.map(- _ / _).zipWithIndex.filter(Function.tupled((_, i) => z.q.isInternal(i))).filter(Function.tupled((e, _) => Field[R].zero <= e && e <= eps)).sortBy(_._1).view.map(_._2).foldLeft(z.copy(q = qpp, p = ppp)(U(qpp), K(ppp))) { (z, i) =>
val q = (if (z.q.isInternal(i)) rng.nextInt(3) else 0) match {
case 0 => z.q
case 1 => z.q.nni(i, false)
case 2 => z.q.nni(i, true)
}
val p = z.p.updated(i, -z.p(i))
z.copy(q, p)(U(q), K(p))
}
val pppp = zp.p - halfEps *: zp.dU
zp.copy(p = pppp)(_K = K(pppp))
}
}
|
armanbilge/phyloHMC
|
src/main/scala/group/matsen/phylohmc/ReflectiveLeapProg.scala
|
Scala
|
agpl-3.0
| 1,393 |
import scala.reflect.runtime.universe._
import scala.tools.reflect.Eval
object Test extends dotty.runtime.LegacyApp {
reify {
class Complex(val re: Double, val im: Double) {
def + (that: Complex) =
new Complex(re + that.re, im + that.im)
def - (that: Complex) =
new Complex(re - that.re, im - that.im)
def * (that: Complex) =
new Complex(re * that.re - im * that.im,
re * that.im + im * that.re)
def / (that: Complex) = {
val denom = that.re * that.re + that.im * that.im
new Complex((re * that.re + im * that.im) / denom,
(im * that.re - re * that.im) / denom)
}
override def toString =
re + (if (im < 0) "-" + (-im) else "+" + im) + "*i"
}
val x = new Complex(2, 1); val y = new Complex(1, 3)
println(x + y)
}.eval
}
|
yusuke2255/dotty
|
tests/disabled/macro/run/reify_complex.scala
|
Scala
|
bsd-3-clause
| 864 |
package org.jetbrains.plugins.scala.lang.psi.stubs.elements.signatures
import com.intellij.lang.ASTNode
import org.jetbrains.plugins.scala.lang.psi.api.statements.params.ScParameter
import org.jetbrains.plugins.scala.lang.psi.impl.statements.params.ScParameterImpl
import org.jetbrains.plugins.scala.lang.psi.stubs.ScParameterStub
/**
* User: Alexander Podkhalyuzin
* Date: 19.10.2008
*/
class ScParameterElementType extends ScParamElementType[ScParameter]("parameter") {
override def createElement(node: ASTNode): ScParameter = new ScParameterImpl(node)
override def createPsi(stub: ScParameterStub): ScParameter = new ScParameterImpl(stub)
}
|
gtache/intellij-lsp
|
intellij-lsp-dotty/src/org/jetbrains/plugins/scala/lang/psi/stubs/elements/signatures/ScParameterElementType.scala
|
Scala
|
apache-2.0
| 654 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.streaming
import java.util.concurrent.TimeUnit.NANOSECONDS
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions.{Attribute, Expression, GenericInternalRow, JoinedRow, Literal, UnsafeProjection, UnsafeRow}
import org.apache.spark.sql.catalyst.plans._
import org.apache.spark.sql.catalyst.plans.logical.EventTimeWatermark._
import org.apache.spark.sql.catalyst.plans.physical._
import org.apache.spark.sql.execution.{BinaryExecNode, SparkPlan}
import org.apache.spark.sql.execution.streaming.StreamingSymmetricHashJoinHelper._
import org.apache.spark.sql.execution.streaming.state._
import org.apache.spark.sql.execution.streaming.state.SymmetricHashJoinStateManager.KeyToValuePair
import org.apache.spark.sql.internal.SessionState
import org.apache.spark.util.{CompletionIterator, SerializableConfiguration}
/**
* Performs stream-stream join using symmetric hash join algorithm. It works as follows.
*
* /-----------------------\\
* left side input --------->| left side state |------\\
* \\-----------------------/ |
* |--------> joined output
* /-----------------------\\ |
* right side input -------->| right side state |------/
* \\-----------------------/
*
* Each join side buffers past input rows as streaming state so that the past input can be joined
* with future input on the other side. This buffer state is effectively a multi-map:
* equi-join key -> list of past input rows received with the join key
*
* For each input row in each side, the following operations take place.
* - Calculate join key from the row.
* - Use the join key to append the row to the buffer state of the side that the row came from.
* - Find past buffered values for the key from the other side. For each such value, emit the
* "joined row" (left-row, right-row)
* - Apply the optional condition to filter the joined rows as the final output.
*
* If a timestamp column with event time watermark is present in the join keys or in the input
* data, then the it uses the watermark figure out which rows in the buffer will not join with
* and the new data, and therefore can be discarded. Depending on the provided query conditions, we
* can define thresholds on both state key (i.e. joining keys) and state value (i.e. input rows).
* There are three kinds of queries possible regarding this as explained below.
* Assume that watermark has been defined on both `leftTime` and `rightTime` columns used below.
*
* 1. When timestamp/time-window + watermark is in the join keys. Example (pseudo-SQL):
*
* SELECT * FROM leftTable, rightTable
* ON
* leftKey = rightKey AND
* window(leftTime, "1 hour") = window(rightTime, "1 hour") // 1hr tumbling windows
*
* In this case, this operator will join rows newer than watermark which fall in the same
* 1 hour window. Say the event-time watermark is "12:34" (both left and right input).
* Then input rows can only have time > 12:34. Hence, they can only join with buffered rows
* where window >= 12:00 - 1:00 and all buffered rows with join window < 12:00 can be
* discarded. In other words, the operator will discard all state where
* window in state key (i.e. join key) < event time watermark. This threshold is called
* State Key Watermark.
*
* 2. When timestamp range conditions are provided (no time/window + watermark in join keys). E.g.
*
* SELECT * FROM leftTable, rightTable
* ON
* leftKey = rightKey AND
* leftTime > rightTime - INTERVAL 8 MINUTES AND leftTime < rightTime + INTERVAL 1 HOUR
*
* In this case, the event-time watermark and the BETWEEN condition can be used to calculate a
* state watermark, i.e., time threshold for the state rows that can be discarded.
* For example, say each join side has a time column, named "leftTime" and
* "rightTime", and there is a join condition "leftTime > rightTime - 8 min".
* While processing, say the watermark on right input is "12:34". This means that from henceforth,
* only right inputs rows with "rightTime > 12:34" will be processed, and any older rows will be
* considered as "too late" and therefore dropped. Then, the left side buffer only needs
* to keep rows where "leftTime > rightTime - 8 min > 12:34 - 8m > 12:26".
* That is, the left state watermark is 12:26, and any rows older than that can be dropped from
* the state. In other words, the operator will discard all state where
* timestamp in state value (input rows) < state watermark. This threshold is called
* State Value Watermark (to distinguish from the state key watermark).
*
* Note:
* - The event watermark value of one side is used to calculate the
* state watermark of the other side. That is, a condition ~ "leftTime > rightTime + X" with
* right side event watermark is used to calculate the left side state watermark. Conversely,
* a condition ~ "left < rightTime + Y" with left side event watermark is used to calculate
* right side state watermark.
* - Depending on the conditions, the state watermark maybe different for the left and right
* side. In the above example, leftTime > 12:26 AND rightTime > 12:34 - 1 hour = 11:34.
* - State can be dropped from BOTH sides only when there are conditions of the above forms that
* define time bounds on timestamp in both directions.
*
* 3. When both window in join key and time range conditions are present, case 1 + 2.
* In this case, since window equality is a stricter condition than the time range, we can
* use the the State Key Watermark = event time watermark to discard state (similar to case 1).
*
* @param leftKeys Expression to generate key rows for joining from left input
* @param rightKeys Expression to generate key rows for joining from right input
* @param joinType Type of join (inner, left outer, etc.)
* @param condition Conditions to filter rows, split by left, right, and joined. See
* [[JoinConditionSplitPredicates]]
* @param stateInfo Version information required to read join state (buffered rows)
* @param eventTimeWatermark Watermark of input event, same for both sides
* @param stateWatermarkPredicates Predicates for removal of state, see
* [[JoinStateWatermarkPredicates]]
* @param left Left child plan
* @param right Right child plan
*/
case class StreamingSymmetricHashJoinExec(
leftKeys: Seq[Expression],
rightKeys: Seq[Expression],
joinType: JoinType,
condition: JoinConditionSplitPredicates,
stateInfo: Option[StatefulOperatorStateInfo],
eventTimeWatermark: Option[Long],
stateWatermarkPredicates: JoinStateWatermarkPredicates,
stateFormatVersion: Int,
left: SparkPlan,
right: SparkPlan) extends SparkPlan with BinaryExecNode with StateStoreWriter {
def this(
leftKeys: Seq[Expression],
rightKeys: Seq[Expression],
joinType: JoinType,
condition: Option[Expression],
stateFormatVersion: Int,
left: SparkPlan,
right: SparkPlan) = {
this(
leftKeys, rightKeys, joinType, JoinConditionSplitPredicates(condition, left, right),
stateInfo = None, eventTimeWatermark = None,
stateWatermarkPredicates = JoinStateWatermarkPredicates(), stateFormatVersion, left, right)
}
if (stateFormatVersion < 2 && joinType != Inner) {
throw new IllegalArgumentException("The query is using stream-stream outer join with state" +
s" format version ${stateFormatVersion} - correctness issue is discovered. Please discard" +
" the checkpoint and rerun the query. See SPARK-26154 for more details.")
}
private def throwBadJoinTypeException(): Nothing = {
throw new IllegalArgumentException(
s"${getClass.getSimpleName} should not take $joinType as the JoinType")
}
require(
joinType == Inner || joinType == LeftOuter || joinType == RightOuter,
s"${getClass.getSimpleName} should not take $joinType as the JoinType")
require(leftKeys.map(_.dataType) == rightKeys.map(_.dataType))
private val storeConf = new StateStoreConf(sqlContext.conf)
private val hadoopConfBcast = sparkContext.broadcast(
new SerializableConfiguration(SessionState.newHadoopConf(
sparkContext.hadoopConfiguration, sqlContext.conf)))
val nullLeft = new GenericInternalRow(left.output.map(_.withNullability(true)).length)
val nullRight = new GenericInternalRow(right.output.map(_.withNullability(true)).length)
override def requiredChildDistribution: Seq[Distribution] =
HashClusteredDistribution(leftKeys, stateInfo.map(_.numPartitions)) ::
HashClusteredDistribution(rightKeys, stateInfo.map(_.numPartitions)) :: Nil
override def output: Seq[Attribute] = joinType match {
case _: InnerLike => left.output ++ right.output
case LeftOuter => left.output ++ right.output.map(_.withNullability(true))
case RightOuter => left.output.map(_.withNullability(true)) ++ right.output
case _ => throwBadJoinTypeException()
}
override def outputPartitioning: Partitioning = joinType match {
case _: InnerLike =>
PartitioningCollection(Seq(left.outputPartitioning, right.outputPartitioning))
case LeftOuter => PartitioningCollection(Seq(left.outputPartitioning))
case RightOuter => PartitioningCollection(Seq(right.outputPartitioning))
case x =>
throw new IllegalArgumentException(
s"${getClass.getSimpleName} should not take $x as the JoinType")
}
override def shouldRunAnotherBatch(newMetadata: OffsetSeqMetadata): Boolean = {
val watermarkUsedForStateCleanup =
stateWatermarkPredicates.left.nonEmpty || stateWatermarkPredicates.right.nonEmpty
// Latest watermark value is more than that used in this previous executed plan
val watermarkHasChanged =
eventTimeWatermark.isDefined && newMetadata.batchWatermarkMs > eventTimeWatermark.get
watermarkUsedForStateCleanup && watermarkHasChanged
}
protected override def doExecute(): RDD[InternalRow] = {
val stateStoreCoord = sqlContext.sessionState.streamingQueryManager.stateStoreCoordinator
val stateStoreNames = SymmetricHashJoinStateManager.allStateStoreNames(LeftSide, RightSide)
left.execute().stateStoreAwareZipPartitions(
right.execute(), stateInfo.get, stateStoreNames, stateStoreCoord)(processPartitions)
}
private def processPartitions(
leftInputIter: Iterator[InternalRow],
rightInputIter: Iterator[InternalRow]): Iterator[InternalRow] = {
if (stateInfo.isEmpty) {
throw new IllegalStateException(s"Cannot execute join as state info was not specified\\n$this")
}
val numOutputRows = longMetric("numOutputRows")
val numUpdatedStateRows = longMetric("numUpdatedStateRows")
val numTotalStateRows = longMetric("numTotalStateRows")
val allUpdatesTimeMs = longMetric("allUpdatesTimeMs")
val allRemovalsTimeMs = longMetric("allRemovalsTimeMs")
val commitTimeMs = longMetric("commitTimeMs")
val stateMemory = longMetric("stateMemory")
val updateStartTimeNs = System.nanoTime
val joinedRow = new JoinedRow
val postJoinFilter =
newPredicate(condition.bothSides.getOrElse(Literal(true)), left.output ++ right.output).eval _
val leftSideJoiner = new OneSideHashJoiner(
LeftSide, left.output, leftKeys, leftInputIter,
condition.leftSideOnly, postJoinFilter, stateWatermarkPredicates.left)
val rightSideJoiner = new OneSideHashJoiner(
RightSide, right.output, rightKeys, rightInputIter,
condition.rightSideOnly, postJoinFilter, stateWatermarkPredicates.right)
// Join one side input using the other side's buffered/state rows. Here is how it is done.
//
// - `leftJoiner.joinWith(rightJoiner)` generates all rows from matching new left input with
// stored right input, and also stores all the left input
//
// - `rightJoiner.joinWith(leftJoiner)` generates all rows from matching new right input with
// stored left input, and also stores all the right input. It also generates all rows from
// matching new left input with new right input, since the new left input has become stored
// by that point. This tiny asymmetry is necessary to avoid duplication.
val leftOutputIter = leftSideJoiner.storeAndJoinWithOtherSide(rightSideJoiner) {
(input: InternalRow, matched: InternalRow) => joinedRow.withLeft(input).withRight(matched)
}
val rightOutputIter = rightSideJoiner.storeAndJoinWithOtherSide(leftSideJoiner) {
(input: InternalRow, matched: InternalRow) => joinedRow.withLeft(matched).withRight(input)
}
// We need to save the time that the inner join output iterator completes, since outer join
// output counts as both update and removal time.
var innerOutputCompletionTimeNs: Long = 0
def onInnerOutputCompletion = {
innerOutputCompletionTimeNs = System.nanoTime
}
// This is the iterator which produces the inner join rows. For outer joins, this will be
// prepended to a second iterator producing outer join rows; for inner joins, this is the full
// output.
val innerOutputIter = CompletionIterator[InternalRow, Iterator[InternalRow]](
(leftOutputIter ++ rightOutputIter), onInnerOutputCompletion)
val outputIter: Iterator[InternalRow] = joinType match {
case Inner =>
innerOutputIter
case LeftOuter =>
// We generate the outer join input by:
// * Getting an iterator over the rows that have aged out on the left side. These rows are
// candidates for being null joined. Note that to avoid doing two passes, this iterator
// removes the rows from the state manager as they're processed.
// * (state format version 1) Checking whether the current row matches a key in the
// right side state, and that key has any value which satisfies the filter function when
// joined. If it doesn't, we know we can join with null, since there was never
// (including this batch) a match within the watermark period. If it does, there must have
// been a match at some point, so we know we can't join with null.
// * (state format version 2) We found edge-case of above approach which brings correctness
// issue, and had to take another approach (see SPARK-26154); now Spark stores 'matched'
// flag along with row, which is set to true when there's any matching row on the right.
def matchesWithRightSideState(leftKeyValue: UnsafeRowPair) = {
rightSideJoiner.get(leftKeyValue.key).exists { rightValue =>
postJoinFilter(joinedRow.withLeft(leftKeyValue.value).withRight(rightValue))
}
}
val removedRowIter = leftSideJoiner.removeOldState()
val outerOutputIter = removedRowIter.filterNot { kv =>
stateFormatVersion match {
case 1 => matchesWithRightSideState(new UnsafeRowPair(kv.key, kv.value))
case 2 => kv.matched
case _ =>
throw new IllegalStateException("Unexpected state format version! " +
s"version $stateFormatVersion")
}
}.map(pair => joinedRow.withLeft(pair.value).withRight(nullRight))
innerOutputIter ++ outerOutputIter
case RightOuter =>
// See comments for left outer case.
def matchesWithLeftSideState(rightKeyValue: UnsafeRowPair) = {
leftSideJoiner.get(rightKeyValue.key).exists { leftValue =>
postJoinFilter(joinedRow.withLeft(leftValue).withRight(rightKeyValue.value))
}
}
val removedRowIter = rightSideJoiner.removeOldState()
val outerOutputIter = removedRowIter.filterNot { kv =>
stateFormatVersion match {
case 1 => matchesWithLeftSideState(new UnsafeRowPair(kv.key, kv.value))
case 2 => kv.matched
case _ =>
throw new IllegalStateException("Unexpected state format version! " +
s"version $stateFormatVersion")
}
}.map(pair => joinedRow.withLeft(nullLeft).withRight(pair.value))
innerOutputIter ++ outerOutputIter
case _ => throwBadJoinTypeException()
}
val outputProjection = UnsafeProjection.create(left.output ++ right.output, output)
val outputIterWithMetrics = outputIter.map { row =>
numOutputRows += 1
outputProjection(row)
}
// Function to remove old state after all the input has been consumed and output generated
def onOutputCompletion = {
// All processing time counts as update time.
allUpdatesTimeMs += math.max(NANOSECONDS.toMillis(System.nanoTime - updateStartTimeNs), 0)
// Processing time between inner output completion and here comes from the outer portion of a
// join, and thus counts as removal time as we remove old state from one side while iterating.
if (innerOutputCompletionTimeNs != 0) {
allRemovalsTimeMs +=
math.max(NANOSECONDS.toMillis(System.nanoTime - innerOutputCompletionTimeNs), 0)
}
allRemovalsTimeMs += timeTakenMs {
// Remove any remaining state rows which aren't needed because they're below the watermark.
//
// For inner joins, we have to remove unnecessary state rows from both sides if possible.
// For outer joins, we have already removed unnecessary state rows from the outer side
// (e.g., left side for left outer join) while generating the outer "null" outputs. Now, we
// have to remove unnecessary state rows from the other side (e.g., right side for the left
// outer join) if possible. In all cases, nothing needs to be outputted, hence the removal
// needs to be done greedily by immediately consuming the returned iterator.
val cleanupIter = joinType match {
case Inner => leftSideJoiner.removeOldState() ++ rightSideJoiner.removeOldState()
case LeftOuter => rightSideJoiner.removeOldState()
case RightOuter => leftSideJoiner.removeOldState()
case _ => throwBadJoinTypeException()
}
while (cleanupIter.hasNext) {
cleanupIter.next()
}
}
// Commit all state changes and update state store metrics
commitTimeMs += timeTakenMs {
val leftSideMetrics = leftSideJoiner.commitStateAndGetMetrics()
val rightSideMetrics = rightSideJoiner.commitStateAndGetMetrics()
val combinedMetrics = StateStoreMetrics.combine(Seq(leftSideMetrics, rightSideMetrics))
// Update SQL metrics
numUpdatedStateRows +=
(leftSideJoiner.numUpdatedStateRows + rightSideJoiner.numUpdatedStateRows)
numTotalStateRows += combinedMetrics.numKeys
stateMemory += combinedMetrics.memoryUsedBytes
combinedMetrics.customMetrics.foreach { case (metric, value) =>
longMetric(metric.name) += value
}
}
}
CompletionIterator[InternalRow, Iterator[InternalRow]](
outputIterWithMetrics, onOutputCompletion)
}
/**
* Internal helper class to consume input rows, generate join output rows using other sides
* buffered state rows, and finally clean up this sides buffered state rows
*
* @param joinSide The JoinSide - either left or right.
* @param inputAttributes The input attributes for this side of the join.
* @param joinKeys The join keys.
* @param inputIter The iterator of input rows on this side to be joined.
* @param preJoinFilterExpr A filter over rows on this side. This filter rejects rows that could
* never pass the overall join condition no matter what other side row
* they're joined with.
* @param postJoinFilter A filter over joined rows. This filter completes the application of
* the overall join condition, assuming that preJoinFilter on both sides
* of the join has already been passed.
* Passed as a function rather than expression to avoid creating the
* predicate twice; we also need this filter later on in the parent exec.
* @param stateWatermarkPredicate The state watermark predicate. See
* [[StreamingSymmetricHashJoinExec]] for further description of
* state watermarks.
*/
private class OneSideHashJoiner(
joinSide: JoinSide,
inputAttributes: Seq[Attribute],
joinKeys: Seq[Expression],
inputIter: Iterator[InternalRow],
preJoinFilterExpr: Option[Expression],
postJoinFilter: (InternalRow) => Boolean,
stateWatermarkPredicate: Option[JoinStateWatermarkPredicate]) {
// Filter the joined rows based on the given condition.
val preJoinFilter =
newPredicate(preJoinFilterExpr.getOrElse(Literal(true)), inputAttributes).eval _
private val joinStateManager = new SymmetricHashJoinStateManager(
joinSide, inputAttributes, joinKeys, stateInfo, storeConf, hadoopConfBcast.value.value,
stateFormatVersion)
private[this] val keyGenerator = UnsafeProjection.create(joinKeys, inputAttributes)
private[this] val stateKeyWatermarkPredicateFunc = stateWatermarkPredicate match {
case Some(JoinStateKeyWatermarkPredicate(expr)) =>
// inputSchema can be empty as expr should only have BoundReferences and does not require
// the schema to generated predicate. See [[StreamingSymmetricHashJoinHelper]].
newPredicate(expr, Seq.empty).eval _
case _ =>
newPredicate(Literal(false), Seq.empty).eval _ // false = do not remove if no predicate
}
private[this] val stateValueWatermarkPredicateFunc = stateWatermarkPredicate match {
case Some(JoinStateValueWatermarkPredicate(expr)) =>
newPredicate(expr, inputAttributes).eval _
case _ =>
newPredicate(Literal(false), Seq.empty).eval _ // false = do not remove if no predicate
}
private[this] var updatedStateRowsCount = 0
/**
* Generate joined rows by consuming input from this side, and matching it with the buffered
* rows (i.e. state) of the other side.
* @param otherSideJoiner Joiner of the other side
* @param generateJoinedRow Function to generate the joined row from the
* input row from this side and the matched row from the other side
*/
def storeAndJoinWithOtherSide(
otherSideJoiner: OneSideHashJoiner)(
generateJoinedRow: (InternalRow, InternalRow) => JoinedRow):
Iterator[InternalRow] = {
val watermarkAttribute = inputAttributes.find(_.metadata.contains(delayKey))
val nonLateRows =
WatermarkSupport.watermarkExpression(watermarkAttribute, eventTimeWatermark) match {
case Some(watermarkExpr) =>
val predicate = newPredicate(watermarkExpr, inputAttributes)
inputIter.filter { row => !predicate.eval(row) }
case None =>
inputIter
}
nonLateRows.flatMap { row =>
val thisRow = row.asInstanceOf[UnsafeRow]
// If this row fails the pre join filter, that means it can never satisfy the full join
// condition no matter what other side row it's matched with. This allows us to avoid
// adding it to the state, and generate an outer join row immediately (or do nothing in
// the case of inner join).
if (preJoinFilter(thisRow)) {
val key = keyGenerator(thisRow)
val outputIter: Iterator[JoinedRow] = otherSideJoiner.joinStateManager
.getJoinedRows(key, thatRow => generateJoinedRow(thisRow, thatRow), postJoinFilter)
new AddingProcessedRowToStateCompletionIterator(key, thisRow, outputIter)
} else {
joinSide match {
case LeftSide if joinType == LeftOuter =>
Iterator(generateJoinedRow(thisRow, nullRight))
case RightSide if joinType == RightOuter =>
Iterator(generateJoinedRow(thisRow, nullLeft))
case _ => Iterator()
}
}
}
}
private class AddingProcessedRowToStateCompletionIterator(
key: UnsafeRow,
thisRow: UnsafeRow,
subIter: Iterator[JoinedRow])
extends CompletionIterator[JoinedRow, Iterator[JoinedRow]](subIter) {
private val iteratorNotEmpty: Boolean = super.hasNext
override def completion(): Unit = {
val shouldAddToState = // add only if both removal predicates do not match
!stateKeyWatermarkPredicateFunc(key) && !stateValueWatermarkPredicateFunc(thisRow)
if (shouldAddToState) {
joinStateManager.append(key, thisRow, matched = iteratorNotEmpty)
updatedStateRowsCount += 1
}
}
}
/**
* Get an iterator over the values stored in this joiner's state manager for the given key.
*
* Should not be interleaved with mutations.
*/
def get(key: UnsafeRow): Iterator[UnsafeRow] = {
joinStateManager.get(key)
}
/**
* Builds an iterator over old state key-value pairs, removing them lazily as they're produced.
*
* @note This iterator must be consumed fully before any other operations are made
* against this joiner's join state manager. For efficiency reasons, the intermediate states of
* the iterator leave the state manager in an undefined state.
*
* We do this to avoid requiring either two passes or full materialization when
* processing the rows for outer join.
*/
def removeOldState(): Iterator[KeyToValuePair] = {
stateWatermarkPredicate match {
case Some(JoinStateKeyWatermarkPredicate(expr)) =>
joinStateManager.removeByKeyCondition(stateKeyWatermarkPredicateFunc)
case Some(JoinStateValueWatermarkPredicate(expr)) =>
joinStateManager.removeByValueCondition(stateValueWatermarkPredicateFunc)
case _ => Iterator.empty
}
}
/** Commit changes to the buffer state and return the state store metrics */
def commitStateAndGetMetrics(): StateStoreMetrics = {
joinStateManager.commit()
joinStateManager.metrics
}
def numUpdatedStateRows: Long = updatedStateRowsCount
}
}
|
caneGuy/spark
|
sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/StreamingSymmetricHashJoinExec.scala
|
Scala
|
apache-2.0
| 27,415 |
package io.iohk.ethereum.consensus.validators
import io.iohk.ethereum.consensus.validators.BlockHeaderError._
import io.iohk.ethereum.domain.BlockHeader
import io.iohk.ethereum.ledger.BloomFilter
import io.iohk.ethereum.utils.{BlockchainConfig, ByteStringUtils}
/** Validator specialized for the block with checkpoint
*
* @param blockchainConfig
*/
class BlockWithCheckpointHeaderValidator(blockchainConfig: BlockchainConfig) {
def validate(blockHeader: BlockHeader, parentHeader: BlockHeader): Either[BlockHeaderError, BlockHeaderValid] = {
for {
_ <- validateLexicographicalOrderOfSignatures(blockHeader)
_ <- validateCheckpointSignatures(blockHeader, parentHeader)
_ <- validateEmptyFields(blockHeader)
_ <- validateFieldsCopiedFromParent(blockHeader, parentHeader)
_ <- validateGasUsed(blockHeader)
_ <- validateTimestamp(blockHeader, parentHeader)
_ <- validateTreasuryOptOut(blockHeader)
} yield BlockHeaderValid
}
private def validateLexicographicalOrderOfSignatures(
header: BlockHeader
): Either[BlockHeaderError, BlockHeaderValid] = {
import io.iohk.ethereum.crypto.ECDSASignatureImplicits.ECDSASignatureOrdering
header.checkpoint
.map { checkpoint =>
if (checkpoint.signatures == checkpoint.signatures.sorted) {
Right(BlockHeaderValid)
} else Left(HeaderInvalidOrderOfCheckpointSignatures)
}
.getOrElse(Left(BlockWithCheckpointHeaderValidator.NoCheckpointInHeaderError))
}
/**
* Validates [[io.iohk.ethereum.domain.BlockHeader.checkpoint]] signatures
*
* @param blockHeader BlockHeader to validate.
* @param parentHeader BlockHeader of the parent of the block to validate.
* @return BlockHeader if valid, an [[io.iohk.ethereum.consensus.validators.BlockHeaderError.HeaderInvalidCheckpointSignatures]] otherwise
*/
private def validateCheckpointSignatures(
blockHeader: BlockHeader,
parentHeader: BlockHeader
): Either[BlockHeaderError, BlockHeaderValid] = {
blockHeader.checkpoint
.map { checkpoint =>
lazy val signaturesWithRecoveredKeys = checkpoint.signatures.map(s => s -> s.publicKey(parentHeader.hash))
// if at least 2 different signatures came from the same signer it will be in this set (also takes care
// of duplicate signatures)
lazy val repeatedSigners = signaturesWithRecoveredKeys
.groupBy(_._2)
.filter(_._2.size > 1)
.keySet
.flatten
lazy val (validSignatures, invalidSignatures) = signaturesWithRecoveredKeys.partition {
//signatures are valid if the signers are known AND distinct
case (sig, Some(pk)) => blockchainConfig.checkpointPubKeys.contains(pk) && !repeatedSigners.contains(pk)
case _ => false
}
// we fail fast if there are too many signatures (DoS protection)
if (checkpoint.signatures.size > blockchainConfig.checkpointPubKeys.size)
Left(HeaderWrongNumberOfCheckpointSignatures(checkpoint.signatures.size))
else if (invalidSignatures.nonEmpty) {
val sigsWithKeys = invalidSignatures.map { case (sig, maybePk) =>
(sig, maybePk.map(ByteStringUtils.hash2string))
}
Left(HeaderInvalidCheckpointSignatures(sigsWithKeys))
} else if (validSignatures.size < blockchainConfig.minRequireSignatures)
Left(HeaderWrongNumberOfCheckpointSignatures(validSignatures.size))
else
Right(BlockHeaderValid)
}
.getOrElse(Left(BlockWithCheckpointHeaderValidator.NoCheckpointInHeaderError))
}
/**
* Validates emptiness of:
* - beneficiary
* - extraData
* - treasuryOptOut
* - ommersHash
* - transactionsRoot
* - receiptsRoot
* - logsBloom
* - nonce
* - mixHash
*
* @param blockHeader BlockHeader to validate.
* @return BlockHeader if valid, an [[io.iohk.ethereum.consensus.validators.BlockHeaderError.HeaderFieldNotEmptyError]] otherwise
*/
private def validateEmptyFields(blockHeader: BlockHeader): Either[BlockHeaderError, BlockHeaderValid] = {
if (blockHeader.beneficiary != BlockHeader.EmptyBeneficiary)
notEmptyFieldError("beneficiary")
else if (blockHeader.ommersHash != BlockHeader.EmptyOmmers)
notEmptyFieldError("ommersHash")
else if (blockHeader.transactionsRoot != BlockHeader.EmptyMpt)
notEmptyFieldError("transactionsRoot")
else if (blockHeader.receiptsRoot != BlockHeader.EmptyMpt)
notEmptyFieldError("receiptsRoot")
else if (blockHeader.logsBloom != BloomFilter.EmptyBloomFilter)
notEmptyFieldError("logsBloom")
else if (blockHeader.extraData.nonEmpty)
notEmptyFieldError("extraData")
else if (blockHeader.nonce.nonEmpty)
notEmptyFieldError("nonce")
else if (blockHeader.mixHash.nonEmpty)
notEmptyFieldError("mixHash")
else Right(BlockHeaderValid)
}
private def notEmptyFieldError(field: String) = Left(HeaderFieldNotEmptyError(s"$field is not empty"))
/**
* Validates fields which should be equal to parent equivalents:
* - stateRoot
*
* @param blockHeader BlockHeader to validate.
* @param parentHeader BlockHeader of the parent of the block to validate.
* @return BlockHeader if valid, an [[io.iohk.ethereum.consensus.validators.BlockHeaderError.HeaderNotMatchParentError]] otherwise
*/
private def validateFieldsCopiedFromParent(
blockHeader: BlockHeader,
parentHeader: BlockHeader
): Either[BlockHeaderError, BlockHeaderValid] = {
if (blockHeader.stateRoot != parentHeader.stateRoot)
fieldNotMatchedParentFieldError("stateRoot")
else if (blockHeader.gasLimit != parentHeader.gasLimit)
fieldNotMatchedParentFieldError("gasLimit")
else if (blockHeader.difficulty != parentHeader.difficulty)
fieldNotMatchedParentFieldError("difficulty")
else Right(BlockHeaderValid)
}
private def fieldNotMatchedParentFieldError(field: String) =
Left(HeaderNotMatchParentError(s"$field has different value that similar parent field"))
/**
* Validates gasUsed equal to zero
* @param blockHeader BlockHeader to validate.
* @return BlockHeader if valid, an [[io.iohk.ethereum.consensus.validators.BlockHeaderError.HeaderGasUsedError]] otherwise
*/
private def validateGasUsed(blockHeader: BlockHeader): Either[BlockHeaderError, BlockHeaderValid] = {
if (blockHeader.gasUsed != BigInt(0)) Left(HeaderGasUsedError)
else Right(BlockHeaderValid)
}
/**
* Validates [[io.iohk.ethereum.domain.BlockHeader.unixTimestamp]] is one bigger than parent unixTimestamp
*
* @param blockHeader BlockHeader to validate.
* @param parentHeader BlockHeader of the parent of the block to validate.
* @return BlockHeader if valid, an [[HeaderTimestampError]] otherwise
*/
private def validateTimestamp(
blockHeader: BlockHeader,
parentHeader: BlockHeader
): Either[BlockHeaderError, BlockHeaderValid] =
if (blockHeader.unixTimestamp == parentHeader.unixTimestamp + 1) Right(BlockHeaderValid)
else Left(HeaderTimestampError)
private def validateTreasuryOptOut(blockHeader: BlockHeader): Either[BlockHeaderError, BlockHeaderValid] =
if (blockHeader.treasuryOptOut.contains(false)) Right(BlockHeaderValid)
else Left(CheckpointHeaderTreasuryOptOutError)
}
object BlockWithCheckpointHeaderValidator {
val NoCheckpointInHeaderError: BlockHeaderError = HeaderUnexpectedError(
"Attempted to validate a checkpoint on a block without a checkpoint"
)
}
|
input-output-hk/etc-client
|
src/main/scala/io/iohk/ethereum/consensus/validators/BlockWithCheckpointHeaderValidator.scala
|
Scala
|
mit
| 7,605 |
package com.wavesplatform.state.diffs.ci
import com.wavesplatform.account.Address
import com.wavesplatform.db.WithState.AddrWithBalance
import com.wavesplatform.db.{DBCacheSettings, WithDomain, WithState}
import com.wavesplatform.lang.directives.DirectiveDictionary
import com.wavesplatform.lang.directives.values.{StdLibVersion, V4, V5}
import com.wavesplatform.lang.script.Script
import com.wavesplatform.lang.v1.compiler.TestCompiler
import com.wavesplatform.test.{PropSpec, produce}
import com.wavesplatform.transaction.Asset.IssuedAsset
import com.wavesplatform.transaction.smart.InvokeScriptTransaction
import com.wavesplatform.transaction.smart.InvokeScriptTransaction.Payment
import com.wavesplatform.transaction.{Transaction, TxHelpers}
import org.scalatest.{EitherValues, Inside}
class InvokePaymentsLimitTest extends PropSpec with Inside with WithState with DBCacheSettings with WithDomain with EitherValues {
import DomainPresets._
private def dApp(version: StdLibVersion, nestedInvoke: Option[(Address, Seq[Payment])]): Script = {
val nested = nestedInvoke.fold("") {
case (address, payments) =>
val paymentsStr = payments.map(p => s"AttachedPayment(base58'${p.assetId}', ${p.amount})").mkString("[", ", ", "]")
s""" strict r = invoke(Address(base58'$address'), "default", [], $paymentsStr) """
}
TestCompiler(version).compileContract(
s"""
| @Callable(i)
| func default() = {
| $nested
| []
| }
""".stripMargin
)
}
private def scenario(version: StdLibVersion, paymentsCount: Int, nested: Boolean): (Seq[AddrWithBalance], Seq[Transaction], InvokeScriptTransaction) = {
val invoker = TxHelpers.signer(0)
val dApp1 = TxHelpers.signer(1)
val dApp2 = TxHelpers.signer(2)
val balances = AddrWithBalance.enoughBalances(invoker, dApp1, dApp2)
val issues = (1 to paymentsCount).map(_ => TxHelpers.issue(if (nested) dApp1 else invoker, 100))
val (nestedInvoke, txPayments) = {
val payments = issues.map(i => Payment(1, IssuedAsset(i.id.value())))
if (nested)
(Some((dApp2.toAddress, payments)), Nil)
else
(None, payments)
}
val ssTx = TxHelpers.setScript(dApp1, dApp(version, nestedInvoke))
val ssTx2 = TxHelpers.setScript(dApp2, dApp(version, None))
val invokeTx = TxHelpers.invoke(dApp1.toAddress, payments = txPayments)
(balances, Seq(ssTx, ssTx2) ++ issues, invokeTx)
}
private def assertLimit(version: StdLibVersion, count: Int, nested: Boolean) = {
val (balances1, preparingTxs, invoke) = scenario(version, count, nested)
withDomain(RideV5, balances1) { d =>
d.appendBlock(preparingTxs: _*)
d.appendBlock(invoke)
d.blockchain.transactionSucceeded(invoke.id.value()) shouldBe true
}
val (balances2, preparingTxs2, invoke2) = scenario(version, count + 1, nested)
withDomain(RideV5, balances2) { d =>
d.appendBlock(preparingTxs2: _*)
d.appendBlockE(invoke2) should produce(s"Script payment amount=${count + 1} should not exceed $count")
}
}
property("payments limit") {
assertLimit(V4, 2, nested = false)
DirectiveDictionary[StdLibVersion].all.filter(_ >= V5).foreach(assertLimit(_, 10, nested = false))
DirectiveDictionary[StdLibVersion].all.filter(_ >= V5).foreach(assertLimit(_, 10, nested = true))
}
}
|
wavesplatform/Waves
|
node/src/test/scala/com/wavesplatform/state/diffs/ci/InvokePaymentsLimitTest.scala
|
Scala
|
mit
| 3,397 |
package com.socrata.util.locks
package simple
import scala.{collection => sc}
import sc.{mutable => scm}
import org.joda.time.DateTime
private [simple] class LockStructure {
var heldBy: Thread = null
var holdCount: Int = 0
var heldSince: DateTime = null
val waiters = new scm.HashSet[Thread]
def acquiredBy(newOwner: Thread): Unit = {
// PRECONDITION: either I own the lock's monitor, or I hold the only reference to it.
heldBy = newOwner
holdCount += 1
heldSince = new DateTime
waiters.remove(newOwner)
}
}
class SimpleLocker extends Locker {
private val locks = new scm.HashMap[String, LockStructure]
// Locking protocol:
// * If I hold both this object's monitor and a LockStructure's, I took this's first.
// * If I sleep on a lock, I do not hold this object's monitor
def lock(lockId: String, maxWaitLength: Int, timeout: Long) = doLock(lockId, maxWaitLength, Locker.deadlineForTimeout(timeout))
def lockHeld(lockId: String): Boolean = {
validateLockId(lockId)
synchronized { locks.contains(lockId) }
}
def lockHeldSince(lockId: String): Option[DateTime] = {
validateLockId(lockId)
synchronized { locks.get(lockId).map(_.heldSince) }
}
private def doLock(lockId: String, maxWaitLength: Int, deadline: Long): LockResult = {
validateLockId(lockId)
val self = Thread.currentThread()
val lock = synchronized {
locks.get(lockId) match {
case None =>
// it doesn't exist, so I need to create and take it all in one step
val lock = new LockStructure
locks(lockId) = lock
lock.acquiredBy(self)
return Locked(new SimpleUnlocker(lockId, lock))
case Some(lock) =>
lock.synchronized {
if(!lock.waiters(self)) {
if(lock.heldBy != self && lock.waiters.size >= maxWaitLength) return TooManyWaiters
// prevent it from being removed from the hashset if someone else
// unlocks it right this instant.
lock.waiters.add(self)
}
}
lock
}
}
lock.synchronized {
def canOwnLock() = lock.heldBy == null || self == lock.heldBy
while(!canOwnLock() && (deadline - System.currentTimeMillis > 0))
lock.wait(math.max(deadline - System.currentTimeMillis, 1L)) // what to do if interrupted...?
if(!canOwnLock()) {
lock.waiters -= self
return TooLongWait
}
lock.acquiredBy(self)
}
Locked(new SimpleUnlocker(lockId, lock));
}
private def unlock(lockId: String, lock: LockStructure): Unit = {
// I won't be sleeping, but I might be modifying the "locks" map, so I
// need to take this object's monitor first
synchronized {
lock.synchronized {
lock.holdCount -= 1
if(lock.holdCount == 0) {
if(lock.waiters.isEmpty) {
locks -= lockId
} else {
lock.heldBy = null
lock.notify()
}
}
}
}
}
private class SimpleUnlocker(lockId: String, var lock: LockStructure) extends Unlocker {
def unlock(): Unit = {
SimpleLocker.this.unlock(lockId, lock)
lock = null // make improper (i.e., multiple) use of an Unlocker fail fast
}
}
}
|
socrata-platform/socrata-utils
|
src/main/scala/com/socrata/util/locks/simple/SimpleLocker.scala
|
Scala
|
apache-2.0
| 3,295 |
/* ___ _ ___ _ _ *\\
** / __| |/ (_) | | The SKilL Generator **
** \\__ \\ ' <| | | |__ (c) 2013 University of Stuttgart **
** |___/_|\\_\\_|_|____| see LICENSE **
\\* */
package de.ust.skill.generator.ada.internal
import de.ust.skill.generator.ada.GeneralOutputMaker
trait ByteReaderBodyMaker extends GeneralOutputMaker {
abstract override def make {
super.make
val out = open(s"""${packagePrefix}-api-internal-byte_reader.adb""")
out.write(s"""
package body ${packagePrefix.capitalize}.Api.Internal.Byte_Reader is
procedure Read_Buffer (
Stream : not null access Ada.Streams.Root_Stream_Type'Class;
Item : out Buffer
) is
use Ada.Streams;
Buffer : Stream_Element_Array (1 .. Stream_Element_Offset (Buffer_Size));
Last : Stream_Element_Offset;
begin
Stream.Read (Buffer, Last);
Buffer_Last := Positive (Last);
for I in 1 .. Last loop
Item (Integer (I)) := Byte (Buffer (I));
end loop;
end Read_Buffer;
procedure Reset_Buffer is
begin
Byte_Reader.Buffer_Index := Byte_Reader.Buffer_Size;
end Reset_Buffer;
function End_Of_Buffer return Boolean is
(Byte_Reader.Buffer_Index >= Byte_Reader.Buffer_Last);
function Read_Byte (Stream : ASS_IO.Stream_Access) return Byte is
begin
if Buffer_Size = Buffer_Index then
Buffer'Read (Stream, Buffer_Array);
Buffer_Index := 0;
end if;
Buffer_Index := Buffer_Index + 1;
declare
Next : Byte := Buffer_Array (Buffer_Index);
begin
return Next;
end;
end Read_Byte;
function Read_i8 (Stream : ASS_IO.Stream_Access) return i8 is
function Convert is new Ada.Unchecked_Conversion (Byte, i8);
begin
return Convert (Read_Byte (Stream));
end Read_i8;
function Read_i16 (Stream : ASS_IO.Stream_Access) return i16 is
A : i16 := i16 (Read_Byte (Stream));
B : i16 := i16 (Read_Byte (Stream));
begin
return A * (2 ** 8) +
B;
end Read_i16;
function Read_i32 (Stream : ASS_IO.Stream_Access) return i32 is
A : i32 := i32 (Read_Byte (Stream));
B : i32 := i32 (Read_Byte (Stream));
C : i32 := i32 (Read_Byte (Stream));
D : i32 := i32 (Read_Byte (Stream));
begin
return A * (2 ** 24) +
B * (2 ** 16) +
C * (2 ** 8) +
D;
end Read_i32;
function Read_i64 (Stream : ASS_IO.Stream_Access) return i64 is
A : i64 := i64 (Read_Byte (Stream));
B : i64 := i64 (Read_Byte (Stream));
C : i64 := i64 (Read_Byte (Stream));
D : i64 := i64 (Read_Byte (Stream));
E : i64 := i64 (Read_Byte (Stream));
F : i64 := i64 (Read_Byte (Stream));
G : i64 := i64 (Read_Byte (Stream));
H : i64 := i64 (Read_Byte (Stream));
begin
return A * (2 ** 56) +
B * (2 ** 48) +
C * (2 ** 40) +
D * (2 ** 32) +
E * (2 ** 24) +
F * (2 ** 16) +
G * (2 ** 8) +
H;
end Read_i64;
function Read_v64 (Stream : ASS_IO.Stream_Access) return v64 is
use Interfaces;
function Convert is new Ada.Unchecked_Conversion (Unsigned_64, v64);
Count : Natural := 0;
Return_Value : Unsigned_64 := 0;
Bucket : Unsigned_64 := Unsigned_64 (Read_Byte (Stream));
begin
while (Count < 8 and then 0 /= (Bucket and 16#80#)) loop
Return_Value := Return_Value or ((Bucket and 16#7f#) * (2 ** (7 * Count)));
Count := Count + 1;
Bucket := Unsigned_64 (Read_Byte (Stream));
end loop;
case Count is
when 8 => Return_Value := Return_Value or (Bucket * (2 ** (7 * Count)));
when others => Return_Value := Return_Value or ((Bucket and 16#7f#) * (2 ** (7 * Count)));
end case;
return Convert (Return_Value);
end Read_v64;
function Read_f32 (Stream : ASS_IO.Stream_Access) return f32 is
function Convert is new Ada.Unchecked_Conversion (i32, f32);
A : i32 := Read_i32 (Stream);
begin
return Convert (A);
end Read_f32;
function Read_f64 (Stream : ASS_IO.Stream_Access) return f64 is
function Convert is new Ada.Unchecked_Conversion (i64, f64);
A : i64 := Read_i64 (Stream);
begin
return Convert (A);
end Read_f64;
function Read_Boolean (Stream : ASS_IO.Stream_Access) return Boolean is
Unexcepted_Value : exception;
begin
case Read_Byte (Stream) is
when 16#ff# => return True;
when 16#00# => return False;
when others => raise Unexcepted_Value;
end case;
end Read_Boolean;
function Read_String (
Stream : ASS_IO.Stream_Access;
Length : i32
) return String is
New_String : String (1 .. Integer (Length));
begin
for I in Integer range 1 .. Integer (Length) loop
New_String (I) := Character'Val (Read_Byte (Stream));
end loop;
return New_String;
end Read_String;
procedure Skip_Bytes (
Stream : ASS_IO.Stream_Access;
Length : Long
) is
begin
for I in 1 .. Length loop
declare
Skip : Byte := Read_Byte (Stream);
begin
null;
end;
end loop;
end Skip_Bytes;
end ${packagePrefix.capitalize}.Api.Internal.Byte_Reader;
""")
out.close()
}
}
|
XyzNobody/skill
|
src/main/scala/de/ust/skill/generator/ada/api/internal/ByteReaderBodyMaker.scala
|
Scala
|
bsd-3-clause
| 5,624 |
package akka.persistence.eventstore.journal
import akka.persistence.journal.JournalSpec
import akka.persistence.eventstore.EventStorePluginSpec
class JournalIntegrationSpec extends JournalSpec with EventStorePluginSpec
|
petervdm/EventStore.Akka.Persistence
|
src/test/scala/akka/persistence/eventstore/journal/JournalIntegrationSpec.scala
|
Scala
|
bsd-3-clause
| 220 |
/*
* Copyright 2015 Paul Horn
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package de.knutwalker.ntparser
class ParseError(t: String) extends Exception(t)
|
knutwalker/NtParser
|
core/src/main/scala/de/knutwalker/ntparser/ParseError.scala
|
Scala
|
apache-2.0
| 675 |
/*
* This software is licensed under the GNU Affero General Public License, quoted below.
*
* This file is a part of PowerAPI.
*
* Copyright (C) 2011-2016 Inria, University of Lille 1.
*
* PowerAPI is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of
* the License, or (at your option) any later version.
*
* PowerAPI is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with PowerAPI.
*
* If not, please consult http://www.gnu.org/licenses/agpl-3.0.html.
*/
package org.powerapi.core.target
import scala.concurrent.duration.DurationInt
import akka.util.Timeout
import org.powerapi.UnitTest
class TargetSuite extends UnitTest {
val timeout = Timeout(1.seconds)
override def afterAll() = {
system.terminate()
}
"The implicit methods" should "convert an int to a Process and a string to an Application" in {
val process: Target = 1
process should equal(Process(1))
val application: Target = "app"
application should equal(Application("app"))
}
}
|
Spirals-Team/powerapi
|
powerapi-core/src/test/scala/org/powerapi/core/target/TargetSuite.scala
|
Scala
|
agpl-3.0
| 1,400 |
package at.forsyte.apalache.tla.bmcmt.caches
import at.forsyte.apalache.tla.bmcmt.StackableContext
import at.forsyte.apalache.tla.bmcmt.rewriter.Recoverable
import scala.collection.immutable.HashMap
/**
* A stackable cache that allows one to store values and retrieve them later.
*
* @author Igor Konnov
*/
class SimpleCache[KeyT, ValueT] extends StackableContext with Recoverable[SimpleCacheSnapshot[KeyT, ValueT]] {
/**
* A context level, see StackableContext
*/
protected var level: Int = 0
// cache values
protected var cache: Map[KeyT, (ValueT, Int)] = HashMap()
def values(): Iterable[ValueT] = {
cache.map(_._2._1)
}
/**
* Put a value into the cache.
*
* @param key a key
* @param value a value
*/
def put(key: KeyT, value: ValueT): Unit = {
cache += (key -> (value, level))
}
/**
* Get a previously cached value for a given source value, if there is one. Otherwise, return none.
*
* @param key a key
* @return Some(value) if there is a value matching the key, or None otherwise
*/
def get(key: KeyT): Option[ValueT] = {
cache.get(key) match {
case Some((target, _)) => Some(target)
case None => None
}
}
/**
* Take a snapshot and return it
*
* @return the snapshot
*/
override def snapshot(): SimpleCacheSnapshot[KeyT, ValueT] = {
val squashedCache = cache.map { case (key, (value, _)) => (key, (value, 0)) }
new SimpleCacheSnapshot(squashedCache)
}
/**
* Recover a previously saved snapshot (not necessarily saved by this object).
*
* @param shot a snapshot
*/
override def recover(shot: SimpleCacheSnapshot[KeyT, ValueT]): Unit = {
cache = shot.cache
}
/**
* Get the current context level, that is the difference between the number of pushes and pops made so far.
*
* @return the current level, always non-negative.
*/
override def contextLevel: Int = level
/**
* Save the current context and push it on the stack for a later recovery with pop.
*/
override def push(): Unit = {
level += 1
}
/**
* Pop the previously saved context. Importantly, pop may be called multiple times and thus it is not sufficient
* to save only the latest context.
*/
override def pop(): Unit = {
pop(1)
}
/**
* Pop the context as many times as needed to reach a given level.
*
* @param n the number of times to call pop
*/
override def pop(n: Int): Unit = {
assert(level >= n)
level -= n
def isEntryOld(mapEntry: (KeyT, (ValueT, Int))): Boolean =
mapEntry._2._2 <= level
cache = cache filter isEntryOld
}
/**
* Clean the context.
*/
override def dispose(): Unit = {
cache = Map()
level = 0
}
}
|
konnov/dach
|
tla-bmcmt/src/main/scala/at/forsyte/apalache/tla/bmcmt/caches/SimpleCache.scala
|
Scala
|
apache-2.0
| 2,790 |
package bank2
import akka.actor.{ ActorSystem, Actor, Props, ActorRef }
import akka.bita.RandomScheduleHelper
import akka.bita.pattern.Patterns._
import akka.util.duration._
import org.scalatest._
import akka.testkit.TestProbe
import util.BitaTests
class BankSpec extends BitaTests {
// feel free to change these parameters to test the bank with various configurations.
override def name = "bank2"
// Are we expecting certain shedules to fail?
override def expectFailures = true
// delay between start and end message
override def delay = 0
def run {
system = ActorSystem("System")
if (random) {
RandomScheduleHelper.setMaxDelay(250) // Increase the delay between messages to 250 ms
RandomScheduleHelper.setSystem(system)
}
try {
val probe = new TestProbe(system) // Use a testprobe to represent the tests.
var bank = system.actorOf(Bank(delay, probe.ref), "Bank") // A bank without delay between messages.
probe.send(bank, Start) // Start the simulation
val amount = probe.expectMsgType[Int](timeout.duration)
if (amount > 0) {
println(Console.GREEN + Console.BOLD+"**SUCCESS** Charlie has %d on his account".format(amount) + Console.RESET)
bugDetected = false
} else {
println(Console.RED + Console.BOLD+"**FAILURE** Charlie has %d on his account".format(amount) + Console.RESET)
bugDetected = true
}
} catch {
case e: AssertionError => {
bugDetected = true
println(Console.YELLOW + Console.BOLD+"**WARNING** %s".format(e.getMessage()) + Console.RESET)
}
case e: java.util.concurrent.TimeoutException => {
bugDetected = true
println(Console.YELLOW + Console.BOLD+"**WARNING** %s".format(e.getMessage()) + Console.RESET)
}
}
}
}
|
Tjoene/thesis
|
benchmark/src/test/scala/bank2/BankSpec.scala
|
Scala
|
gpl-2.0
| 2,005 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.knockdata.spark.highcharts
import plotoptions._
object PlotOptions {
// get a new area plotOptions
def area = new Area
// get a new arearange plotOptions
def arearange = new AreaRange
// get a new areaspline plotOptions
def areaspline = new AreaSpline
// get a new areasplinerange plotOptions
def areasplinerange = new AreaSplineRange
// get a new bar plotOptions
def bar = new Bar
// get a new boxplot plotOptions
def boxplot = new BoxPlot
// get a new bubble plotOptions
def bubble = new Bubble
// get a new column plotOptions
def column = new Column
// get a new errorbar plotOptions
def errorbar = new ErrorBar
// get a new funnel plotOptions
def funnel = new Funnel
// get a new gauge plotOptions
def gauge = new Gauge
// get a new heatmap plotOptions
def heatmap = new HeatMap
// get a new line plotOptions
def line = new Line
// get a new pie plotOptions
def pie = new Pie
// get a new polygon plotOptions
def polygon = new Polygon
// get a new pyramid plotOptions
def pyramid = new Pyramid
// get a new scatter plotOptions
def scatter = new Scatter
// get a new series plotOptions, the series plotOption apply on all chart type
def series = new Series
// get a new solidgauge plotOptions
def solidgauge = new SolidGauge
// get a new spline plotOptions
def spline = new Spline
// get a new treemap plotOptions
def treemap = new TreeMap
// get a new waterfall plotOptions
def waterfall = new Waterfall
}
|
knockdata/spark-highcharts
|
src/main/scala/com/knockdata/spark/highcharts/PlotOptions.scala
|
Scala
|
apache-2.0
| 2,320 |
/*
* Copyright 2016-2018 SN127.fi
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package fi.sn127.tackler.model
import java.time.ZonedDateTime
import java.time.format.{DateTimeFormatter, DateTimeFormatterBuilder, SignStyle}
import java.time.temporal.ChronoField.DAY_OF_WEEK
import java.time.temporal.IsoFields
object TxnTS {
private val frmtISOWeek = new DateTimeFormatterBuilder()
.appendValue(IsoFields.WEEK_BASED_YEAR, 4, 10, SignStyle.EXCEEDS_PAD)
.appendLiteral("-W")
.appendValue(IsoFields.WEEK_OF_WEEK_BASED_YEAR, 2)
.appendOffset("+HH:MM", "Z")
.toFormatter
// no zoneId as with ISO_WEEK_DATE
// no localized day number as with 'e' (e.g. en_US => sunday == 1)
private val frmtISOWeekDate = new DateTimeFormatterBuilder()
.appendValue(IsoFields.WEEK_BASED_YEAR, 4, 10, SignStyle.EXCEEDS_PAD)
.appendLiteral("-W")
.appendValue(IsoFields.WEEK_OF_WEEK_BASED_YEAR, 2)
.appendLiteral('-')
.appendValue(DAY_OF_WEEK, 1)
.appendOffset("+HH:MM", "Z")
.toFormatter
/**
* ISO-8601 Timestamp with offset.
*
* @return ISO-8601 date-time: 2016-12-17T12:31:12+03:00
*/
def isoZonedTS(ts: ZonedDateTime): String = {
ts.format(DateTimeFormatter.ISO_OFFSET_DATE_TIME)
}
/**
* ISO-8601 date with offset.
*
* @return ISO-8601 date: 2016-12-17T12:31:12+03:00 => 2016-12-17+03:00
*/
def isoDate(ts: ZonedDateTime): String = {
ts.format(DateTimeFormatter.ISO_OFFSET_DATE)
}
/**
* ISO-8601 year with offset.
*
* @param ts timestamp
* @return ISO-8601 date: 2016-12-17T12:31:12+03:00 => 2016+03:00
*/
def isoYear(ts: ZonedDateTime): String = {
ts.format(DateTimeFormatter.ofPattern("yyyyXXX"))
}
/**
* ISO-8601 mont with offset.
*
* @param ts timestamp
* @return ISO-8601 date: 2016-12-17T12:31:12+03:00 => 2016-12+03:00
*/
def isoMonth(ts: ZonedDateTime): String = {
ts.format(DateTimeFormatter.ofPattern("yyyy'-'MMXXX"))
}
/**
* ISO-8601 Week with offset.
*
* @return ISO-8601 week (without date): 2010-01-01 => 2009-W53+03:00
*/
def isoWeek(ts: ZonedDateTime): String = {
ts.format(frmtISOWeek)
}
/**
* ISO-8601 Week date with offset.
*
* @return ISO-8601 week date: 2010-01-01 => 2009-W53-5+03:00
*/
def isoWeekDate(ts: ZonedDateTime): String = {
ts.format(frmtISOWeekDate)
}
}
|
jaa127/tackler
|
core/src/main/scala/fi/sn127/tackler/model/TxnTS.scala
|
Scala
|
apache-2.0
| 2,886 |
package notebook
import notebook.util.Match
import org.junit.runner._
import org.specs2.mutable._
import org.specs2.runner._
@RunWith(classOf[JUnitRunner])
class PresentationCompilerTests extends Specification {
def complete(pc:PresentationCompiler)(s:String, i:Int) = {
val (st, com) = pc.complete(s, i)
(st, com.toSet)
}
"complete" should {
val cz = """|
|class AnExample(val aField:Int, notAField:Boolean=true) {
| def testVar:String = ""
| def testMethod(a:String):String = ""
| def testMethod(a:String, b:String):String = ""
| def testMethod(a:Int, optionalB: String = ""):String = ""
| lazy val toSchwarz:Float = 1f
|}
|implicit class AnExampleWithImplicits(cls: AnExample) {
| def implicitMethod(a: Int): Int = 1
|}
|""".stripMargin
val newInst = "val test = new AnExample(123)"
val newLine = "\\n"
"return the correct completions" in {
if ( sys.env.contains("SKIP_WHEN_TRAVIS") ) {
skipped(": Test skipped on CI, causes StackOverflowError (REPL compiler bug).")
}
val line = "test.toS"
val code = List(newInst, newLine, line).mkString
val pc = new PresentationCompiler(Nil)
pc.addScripts(cz)
val c = complete(pc) _
c(code, code.size) must beEqualTo("toS", Set(
Match("toSchwarz", Map("display_text" -> "toSchwarz: Float")),
Match("toString", Map("display_text" -> "toString: String"))
))
val r = c(code + "\\nval testAsSt$ring = test.toString()", code.size) must beEqualTo("toS", Set(
Match("toSchwarz", Map("display_text" -> "toSchwarz: Float")),
Match("toString", Map("display_text" -> "toString: String"))
))
pc.stop()
r
}
"lists all overrided method versions, indicating optional parameters if any" in {
val line = "test.testMeth"
val code = List(newInst, newLine, line).mkString
val pc = new PresentationCompiler(Nil)
pc.addScripts(cz)
val c = complete(pc) _
val r = c(code, code.size) must beEqualTo("testMeth", Set(
Match("testMethod(a: Int, [optionalB: String])",
Map("display_text" -> "testMethod(a: Int, [optionalB: String]): String")),
Match("testMethod(a: String)", Map("display_text" -> "testMethod(a: String): String")),
Match("testMethod(a: String, b: String)", Map("display_text" -> "testMethod(a: String, b: String): String"))
))
pc.stop()
r
}
"lists the methods inherited and the implicit methods" in {
if ( sys.env.contains("SKIP_WHEN_TRAVIS") ) {
// Compiler exception during call to 'ask' (PresentationCompiler.scala:59)
// at scala.tools.nsc.interactive.Global.pollForWork(Global.scala:324)
skipped(": Test skipped on CI, causes StackOverflowError (REPL compiler bug).")
}
val pc = new PresentationCompiler(Nil)
pc.addScripts(cz)
val c = complete(pc) _
val code1 = List(newInst, newLine, "test.").mkString
val suggestions: Set[String] = c(code1, code1.size)._2.map {case Match(s, _) => s }
println(suggestions.map(s=> s""""${s}""""))
val r = suggestions must containAllOf(Seq(
"+(other: String)",
"clone",
"hashCode",
"asInstanceOf",
"getClass",
"isInstanceOf",
"implicitMethod(a: Int)"
))
pc.stop()
r
}
}
}
|
antonkulaga/spark-notebook
|
test/notebook/PresentationCompilerTests.scala
|
Scala
|
apache-2.0
| 3,438 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.hortonworks.spark.sql.hive.llap
import java.sql.Date
import java.sql.Timestamp
import org.apache.spark.sql.sources._
import org.apache.spark.sql.types._
import org.scalatest.FunSuite
class TestFilterPushdown extends FunSuite {
private val employeeSchema = StructType(Seq(
StructField("employee_id", IntegerType, nullable = true),
StructField("full_name", StringType, nullable = true),
StructField("first_name", StringType, nullable = true),
StructField("last_name", StringType, nullable = true),
StructField("position_id", IntegerType, nullable = true),
StructField("position_title", StringType, nullable = true),
StructField("store_id", IntegerType, nullable = true),
StructField("department_id", IntegerType, nullable = true),
StructField("birth_date", DateType, nullable = true),
StructField("hire_date", TimestampType, nullable = true),
StructField("end_date", TimestampType, nullable = true),
StructField("salary", DecimalType(10, 4), nullable = true),
StructField("supervisor_id", IntegerType, nullable = true),
StructField("education_level", StringType, nullable = true),
StructField("marital_status", StringType, nullable = true),
StructField("gender", StringType, nullable = true),
StructField("management_role", StringType, nullable = true)))
test("where") {
var expr = FilterPushdown.buildWhereClause(employeeSchema, Nil)
assert("" === expr)
expr = FilterPushdown.buildWhereClause(employeeSchema, List())
assert("" === expr)
expr = FilterPushdown.buildWhereClause(
employeeSchema,
List(EqualTo("employee_id", 88)))
assert("WHERE employee_id = 88" === expr)
expr = FilterPushdown.buildWhereClause(
employeeSchema,
List(EqualTo("employee_id", 88), new EqualTo("first_name", "Mike")))
assert("WHERE employee_id = 88 AND first_name = 'Mike'" === expr)
}
test("String escapes") {
checkFilter(employeeSchema,
EqualTo("first_name", "Mike's"),
"first_name = 'Mike\\\\'s'")
}
test("equalTo") {
checkFilter(employeeSchema,
EqualTo("employee_id", 88),
"employee_id = 88")
checkFilter(employeeSchema,
EqualTo("first_name", "Mike"),
"first_name = 'Mike'")
checkFilter(employeeSchema,
EqualTo("hire_date", Timestamp.valueOf("2001-02-03 04:05:06.123")),
"hire_date = TIMESTAMP '2001-02-03 04:05:06.123'")
checkFilter(employeeSchema,
EqualTo("birth_date", Date.valueOf("1961-08-26")),
"birth_date = DATE '1961-08-26'")
}
test("gt") {
checkFilter(employeeSchema,
GreaterThan("employee_id", 88),
"employee_id > 88")
}
test("gte") {
checkFilter(employeeSchema,
GreaterThanOrEqual("employee_id", 88),
"employee_id >= 88")
}
test("lt") {
checkFilter(employeeSchema,
LessThan("employee_id", 88),
"employee_id < 88")
}
test("lte") {
checkFilter(employeeSchema,
LessThanOrEqual("employee_id", 88),
"employee_id <= 88")
}
test("in") {
checkFilter(employeeSchema,
In("employee_id", Array(88, 89, 90)),
"employee_id IN (88,89,90)")
}
test("in - empty values are not allowed") {
checkFilter(employeeSchema,
In("employee_id", Array.empty),
"CASE WHEN employee_id IS NULL THEN NULL ELSE FALSE END")
}
test("string starts with") {
checkFilter(employeeSchema,
StringStartsWith("management_role", "val"),
"management_role LIKE 'val%'")
}
test("string ends with") {
checkFilter(employeeSchema,
StringEndsWith("management_role", "val"),
"management_role LIKE '%val'")
}
test("string contains") {
checkFilter(employeeSchema,
StringContains("management_role", "val"),
"management_role LIKE '%val%'")
}
test("is null") {
checkFilter(employeeSchema,
IsNull("employee_id"),
"employee_id IS NULL")
}
test("is not null") {
checkFilter(employeeSchema,
IsNotNull("employee_id"),
"employee_id IS NOT NULL")
}
test("not") {
checkFilter(employeeSchema,
Not(IsNotNull("employee_id")),
"NOT (employee_id IS NOT NULL)")
}
test("and") {
checkFilter(employeeSchema,
And(IsNotNull("employee_id"), LessThanOrEqual("employee_id", 88)),
"(employee_id IS NOT NULL) AND (employee_id <= 88)")
}
test("or") {
checkFilter(employeeSchema,
Or(IsNotNull("employee_id"), LessThanOrEqual("employee_id", 88)),
"(employee_id IS NOT NULL) OR (employee_id <= 88)")
}
test("nested logical") {
checkFilter(employeeSchema,
And(
Not(In("employee_id", Array(88, 89, 90))),
Or(IsNotNull("employee_id"), LessThanOrEqual("employee_id", 88))),
"(NOT (employee_id IN (88,89,90))) AND ((employee_id IS NOT NULL) OR (employee_id <= 88))")
}
private def checkFilter(schema: StructType, filter: Filter, expected: String) = {
val expr = FilterPushdown.buildFilterExpression(schema, filter)
expected match {
case null => assert(expr.isEmpty)
case _ => assert(expected === expr.get)
}
}
}
|
hortonworks-spark/spark-llap
|
src/test/scala/com/hortonworks/spark/sql/hive/llap/TestFilterPushdown.scala
|
Scala
|
apache-2.0
| 5,914 |
/**
* Copyright (c) 2013, The National Archives <[email protected]>
* http://www.nationalarchives.gov.uk
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package uk.gov.nationalarchives.dri.preingest.loader.unit.disk
import uk.gov.nationalarchives.dri.preingest.loader.unit._
import uk.gov.nationalarchives.dri.preingest.loader.store.DataStore
import scalax.file.Path
import uk.gov.nationalarchives.dri.preingest.loader.certificate.CertificateDetail
import uk.gov.nationalarchives.dri.preingest.loader.unit.DRIUnit.{OrphanedFileName, PartName}
import akka.actor.{Props, ActorRef}
import scala.util.control.Breaks._
import grizzled.slf4j.Logger
import uk.gov.nationalarchives.dri.preingest.loader.unit.common.MediaUnitActor
import uk.gov.nationalarchives.dri.preingest.loader.unit.TargetedPart
import scala.Some
import uk.gov.nationalarchives.dri.preingest.loader.unit.UnitError
class TrueCryptedPartitionUnitActor(var unit: TrueCryptedPartitionUnit) extends MediaUnitActor[TrueCryptedPartitionUnit] with EncryptedDRIUnitActor[TrueCryptedPartitionUnit] { //TODO consider subclassing PhysicalUnit
//TODO stub
def fixityCheck(username: String, part: TargetedPart, passphrase: Option[String], unitManager: Option[ActorRef]) {
}
def copyData(username: String, parts: Seq[TargetedPart], passphrase: Option[String], unitManager: Option[ActorRef]): Unit = copyData(username, parts, None, passphrase, unitManager)
def copyData(username: String, parts: Seq[TargetedPart], certificate: CertificateDetail, passphrase: Option[String], unitManager: Option[ActorRef]): Unit = {
DataStore.withTemporaryFile(Option(certificate)) {
cert =>
copyData(username, parts, cert, passphrase, unitManager)
}
}
def updateDecryptDetail(username: String, passphrase: String) = ??? //updateDecryptDetail(username, , None, passphrase)
def updateDecryptDetail(username: String, listener: ActorRef, certificate: CertificateDetail, passphrase: String) {
val retCode = DataStore.withTemporaryFile(Option(certificate)) {
cert =>
updateDecryptDetail(username, listener, cert, passphrase)
}
if (!retCode)
listener ! UnitError(unit, "Unable to decrypt data for unit ")
}
private def updateDecryptDetail(username: String, listener: ActorRef, certificate: Option[Path], passphrase: String) : Boolean = {
TrueCryptedPartition.getVolumeLabel(settings, unit.src, certificate, passphrase).map {
volumeLabel =>
//extract parts and orphaned files
tempMountPoint(username, unit.src) match {
case Left(ioe) =>
listener ! UnitError(unit, "Unable to decrypt data for unit: " + unit.uid)
error(s"Unable to update decrypted detail for unit: ${unit.uid}", ioe)
false
case Right(tempMountPoint) =>
val (dirs, files) = TrueCryptedPartition.listTopLevel(settings, unit.src, tempMountPoint, certificate, passphrase)(_.partition(_.isDirectory))
//update the unit
this.unit = this.unit.copy(partition = this.unit.partition.copy(partitionLabel = Option(volumeLabel)), parts = Option(dirs.map(_.name)), orphanedFiles = Option(files.map(_.name)))
true
}
}.getOrElse(false)
}
private def copyData(username: String, parts: Seq[TargetedPart], certificate: Option[Path], passphrase: Option[String], unitManager: Option[ActorRef]) {
tempMountPoint(username, unit.partition.deviceFile) match {
case Left(ioe) =>
error(s"Unable to copy data for unit: ${unit.uid}", ioe)
unitManager match {
case Some(sender) => sender ! UnitError(unit, "Unable to copy data for unit:" + ioe.getMessage)
case None =>
}
case Right(mountPoint) =>
TrueCrypt.withVolume(settings, unit.partition.deviceFile, certificate, passphrase.get, mountPoint) {
copyFiles( parts, mountPoint, unitManager)
}
}
}
}
// See http://doc.akka.io/docs/akka/snapshot/scala/actors.html : Recommended Practices
object TrueCryptedPartitionUnitActor {
def props(unit: TrueCryptedPartitionUnit): Props = Props(new TrueCryptedPartitionUnitActor(unit))
}
|
digital-preservation/dali
|
src/main/scala/uk/gov/tna/dri/preingest/loader/unit/disk/TruecryptedPartitionUnitActor.scala
|
Scala
|
mpl-2.0
| 4,361 |
class A {
class B(val x: Int)
def x = 1
def moo() {
new B(/* line: 2 */x = 1)
}
}
|
ilinum/intellij-scala
|
testdata/resolve2/bug3/ConstructorNamedParameters.scala
|
Scala
|
apache-2.0
| 95 |
package is.hail
import java.io._
import java.lang.reflect.Method
import java.net.{URI, URLClassLoader}
import java.security.SecureRandom
import java.text.SimpleDateFormat
import java.util.{Base64, Date}
import java.util.zip.{Deflater, Inflater}
import is.hail.annotations.ExtendedOrdering
import is.hail.check.Gen
import org.apache.commons.io.output.TeeOutputStream
import org.apache.commons.lang3.StringUtils
import org.apache.hadoop.fs.PathIOException
import org.apache.hadoop.mapred.FileSplit
import org.apache.hadoop.mapreduce.lib.input.{FileSplit => NewFileSplit}
import org.apache.log4j.Level
import org.apache.spark.{Partition, TaskContext}
import org.json4s.JsonAST.{JArray, JString}
import org.json4s.jackson.Serialization
import org.json4s.reflect.TypeInfo
import org.json4s.{Extraction, Formats, JObject, NoTypeHints, Serializer}
import scala.collection.generic.CanBuildFrom
import scala.collection.{GenTraversableOnce, TraversableOnce, mutable}
import scala.language.{higherKinds, implicitConversions}
import scala.reflect.ClassTag
import is.hail.io.fs.FS
package utils {
trait Truncatable {
def truncate: String
def strings: (String, String)
}
sealed trait FlattenOrNull[C[_] >: Null] {
def apply[T >: Null](b: mutable.Builder[T, C[T]], it: Iterable[Iterable[T]]): C[T] = {
for (elt <- it) {
if (elt == null)
return null
b ++= elt
}
b.result()
}
}
sealed trait AnyFailAllFail[C[_]] {
def apply[T](ts: TraversableOnce[Option[T]])(implicit cbf: CanBuildFrom[Nothing, T, C[T]]): Option[C[T]] = {
val b = cbf()
for (t <- ts) {
if (t.isEmpty)
return None
else
b += t.get
}
Some(b.result())
}
}
sealed trait MapAccumulate[C[_], U] {
def apply[T, S](a: Iterable[T], z: S)(f: (T, S) => (U, S))
(implicit uct: ClassTag[U], cbf: CanBuildFrom[Nothing, U, C[U]]): C[U] = {
val b = cbf()
var acc = z
for ((x, i) <- a.zipWithIndex) {
val (y, newAcc) = f(x, acc)
b += y
acc = newAcc
}
b.result()
}
}
}
package object utils extends Logging
with richUtils.Implicits
with NumericPairImplicits
with utils.NumericImplicits
with Py4jUtils
with ErrorHandling {
def getStderrAndLogOutputStream[T](implicit tct: ClassTag[T]): OutputStream =
new TeeOutputStream(new LoggerOutputStream(log, Level.ERROR), System.err)
def format(s: String, substitutions: Any*): String = {
substitutions.zipWithIndex.foldLeft(s) { case (str, (value, i)) =>
str.replace(s"@${ i + 1 }", value.toString)
}
}
def checkGzippedFile(fs: FS,
input: String,
forceGZ: Boolean,
gzAsBGZ: Boolean,
maxSizeMB: Int = 128) {
if (!forceGZ && !gzAsBGZ)
fatal(
s"""Cannot load file '$input'
| .gz cannot be loaded in parallel. Is the file actually *block* gzipped?
| If the file is actually block gzipped (even though its extension is .gz),
| use the 'force_bgz' argument to treat all .gz file extensions as .bgz.
| If you are sure that you want to load a non-block-gzipped file serially
| on one core, use the 'force' argument.""".stripMargin)
else if (!gzAsBGZ) {
val fileSize = fs.getFileSize(input)
if (fileSize > 1024 * 1024 * maxSizeMB)
warn(
s"""file '$input' is ${ readableBytes(fileSize) }
| It will be loaded serially (on one core) due to usage of the 'force' argument.
| If it is actually block-gzipped, either rename to .bgz or use the 'force_bgz'
| argument.""".stripMargin)
}
}
def plural(n: Long, sing: String, plur: String = null): String =
if (n == 1)
sing
else if (plur == null)
sing + "s"
else
plur
val noOp: () => Unit = () => ()
def square[T](d: T)(implicit ev: T => scala.math.Numeric[T]#Ops): T = d * d
def triangle(n: Int): Int = (n * (n + 1)) / 2
def treeAggDepth(nPartitions: Int, branchingFactor: Int): Int = {
require(nPartitions >= 0)
require(branchingFactor > 0)
if (nPartitions == 0)
return 1
math.ceil(math.log(nPartitions) / math.log(branchingFactor)).toInt
}
def simpleAssert(p: Boolean) {
if (!p) throw new AssertionError
}
def optionCheckInRangeInclusive[A](low: A, high: A)(name: String, a: A)(implicit ord: Ordering[A]): Unit =
if (ord.lt(a, low) || ord.gt(a, high)) {
fatal(s"$name cannot lie outside [$low, $high]: $a")
}
def printTime[T](block: => T) = {
val timed = time(block)
println("time: " + formatTime(timed._2))
timed._1
}
def time[A](f: => A): (A, Long) = {
val t0 = System.nanoTime()
val result = f
val t1 = System.nanoTime()
(result, t1 - t0)
}
final val msPerMinute = 60 * 1e3
final val msPerHour = 60 * msPerMinute
final val msPerDay = 24 * msPerHour
def formatTime(dt: Long): String = {
val tMilliseconds = dt / 1e6
if (tMilliseconds < 1000)
("%.3f" + "ms").format(tMilliseconds)
else if (tMilliseconds < msPerMinute)
("%.3f" + "s").format(tMilliseconds / 1e3)
else if (tMilliseconds < msPerHour) {
val tMins = (tMilliseconds / msPerMinute).toInt
val tSec = (tMilliseconds % msPerMinute) / 1e3
("%d" + "m" + "%.1f" + "s").format(tMins, tSec)
}
else {
val tHrs = (tMilliseconds / msPerHour).toInt
val tMins = ((tMilliseconds % msPerHour) / msPerMinute).toInt
val tSec = (tMilliseconds % msPerMinute) / 1e3
("%d" + "h" + "%d" + "m" + "%.1f" + "s").format(tHrs, tMins, tSec)
}
}
def space[A](f: => A): (A, Long) = {
val rt = Runtime.getRuntime
System.gc()
System.gc()
val before = rt.totalMemory() - rt.freeMemory()
val r = f
System.gc()
val after = rt.totalMemory() - rt.freeMemory()
(r, after - before)
}
def printSpace[A](f: => A): A = {
val (r, ds) = space(f)
println("space: " + formatSpace(ds))
r
}
def formatSpace(ds: Long, precision: Int = 2): String = {
val absds = ds.abs
val kib = 1024L
val mib = kib * 1024
val gib = mib * 1024
val tib = gib * 1024
val (div: Long, suffix: String) = if (absds < kib)
(1L, "B")
else if (absds < mib)
(kib, "KiB")
else if (absds < gib)
(mib, "MiB")
else if (absds < tib)
(gib, "GiB")
else
(tib, "TiB")
val num = formatDouble(absds.toDouble / div.toDouble, precision)
s"$num $suffix"
}
def someIf[T](p: Boolean, x: => T): Option[T] =
if (p)
Some(x)
else
None
def nullIfNot(p: Boolean, x: Any): Any = {
if (p)
x
else
null
}
def divOption(num: Double, denom: Double): Option[Double] =
someIf(denom != 0, num / denom)
def divNull(num: Double, denom: Double): java.lang.Double =
if (denom == 0)
null
else
num / denom
val defaultTolerance = 1e-6
def D_epsilon(a: Double, b: Double, tolerance: Double = defaultTolerance): Double =
math.max(java.lang.Double.MIN_NORMAL, tolerance * math.max(math.abs(a), math.abs(b)))
def D_==(a: Double, b: Double, tolerance: Double = defaultTolerance): Boolean = {
a == b || math.abs(a - b) <= D_epsilon(a, b, tolerance)
}
def D_!=(a: Double, b: Double, tolerance: Double = defaultTolerance): Boolean = {
!(a == b) && math.abs(a - b) > D_epsilon(a, b, tolerance)
}
def D_<(a: Double, b: Double, tolerance: Double = defaultTolerance): Boolean =
!(a == b) && a - b < -D_epsilon(a, b, tolerance)
def D_<=(a: Double, b: Double, tolerance: Double = defaultTolerance): Boolean =
(a == b) || a - b <= D_epsilon(a, b, tolerance)
def D_>(a: Double, b: Double, tolerance: Double = defaultTolerance): Boolean =
!(a == b) && a - b > D_epsilon(a, b, tolerance)
def D_>=(a: Double, b: Double, tolerance: Double = defaultTolerance): Boolean =
(a == b) || a - b >= -D_epsilon(a, b, tolerance)
def D0_==(x: Double, y: Double, tolerance: Double = defaultTolerance): Boolean =
if (x.isNaN)
y.isNaN
else if (x.isPosInfinity)
y.isPosInfinity
else if (x.isNegInfinity)
y.isNegInfinity
else
D_==(x, y, tolerance)
def flushDouble(a: Double): Double =
if (math.abs(a) < java.lang.Double.MIN_NORMAL) 0.0 else a
def genBase: Gen[Char] = Gen.oneOf('A', 'C', 'T', 'G')
def getPartNumber(fname: String): Int = {
val partRegex = """.*/?part-(\\d+).*""".r
fname match {
case partRegex(i) => i.toInt
case _ => throw new PathIOException(s"invalid partition file '$fname'")
}
}
// ignore size; atomic, like String
def genDNAString: Gen[String] = Gen.stringOf(genBase)
.resize(12)
.filter(s => !s.isEmpty)
def prettyIdentifier(str: String): String = {
if (str.matches("""[_a-zA-Z]\\w*"""))
str
else
s"`${ StringEscapeUtils.escapeString(str, backticked = true) }`"
}
def formatDouble(d: Double, precision: Int): String = d.formatted(s"%.${ precision }f")
def uriPath(uri: String): String = new URI(uri).getPath
// NB: can't use Nothing here because it is not a super type of Null
private object flattenOrNullInstance extends FlattenOrNull[Array]
def flattenOrNull[C[_] >: Null] =
flattenOrNullInstance.asInstanceOf[FlattenOrNull[C]]
private object anyFailAllFailInstance extends AnyFailAllFail[Nothing]
def anyFailAllFail[C[_]]: AnyFailAllFail[C] =
anyFailAllFailInstance.asInstanceOf[AnyFailAllFail[C]]
def uninitialized[T]: T = null.asInstanceOf[T]
private object mapAccumulateInstance extends MapAccumulate[Nothing, Nothing]
def mapAccumulate[C[_], U] =
mapAccumulateInstance.asInstanceOf[MapAccumulate[C, U]]
/**
* An abstraction for building an {@code Array} of known size. Guarantees a left-to-right traversal
*
* @param xs the thing to iterate over
* @param size the size of array to allocate
* @param key given the source value and its source index, yield the target index
* @param combine given the target value, the target index, the source value, and the source index, compute the new target value
* @tparam A
* @tparam B
*/
def coalesce[A, B: ClassTag](xs: GenTraversableOnce[A])(size: Int, key: (A, Int) => Int, z: B)(combine: (B, A) => B): Array[B] = {
val a = Array.fill(size)(z)
for ((x, idx) <- xs.toIterator.zipWithIndex) {
val k = key(x, idx)
a(k) = combine(a(k), x)
}
a
}
def mapSameElements[K, V](l: Map[K, V], r: Map[K, V], valueEq: (V, V) => Boolean): Boolean = {
def entryMismatchMessage(failures: TraversableOnce[(K, V, V)]): String = {
require(failures.nonEmpty)
val newline = System.lineSeparator()
val sb = new StringBuilder
sb ++= "The maps do not have the same entries:" + newline
for (failure <- failures) {
sb ++= s" At key ${ failure._1 }, the left map has ${ failure._2 } and the right map has ${ failure._3 }" + newline
}
sb ++= s" The left map is: $l" + newline
sb ++= s" The right map is: $r" + newline
sb.result()
}
if (l.keySet != r.keySet) {
println(
s"""The maps do not have the same keys.
| These keys are unique to the left-hand map: ${ l.keySet -- r.keySet }
| These keys are unique to the right-hand map: ${ r.keySet -- l.keySet }
| The left map is: $l
| The right map is: $r
""".stripMargin)
false
} else {
val fs = Array.newBuilder[(K, V, V)]
for ((k, lv) <- l) {
val rv = r(k)
if (!valueEq(lv, rv))
fs += ((k, lv, rv))
}
val failures = fs.result()
if (!failures.isEmpty) {
println(entryMismatchMessage(failures))
false
} else {
true
}
}
}
def getIteratorSize[T](iterator: Iterator[T]): Long = {
var count = 0L
while (iterator.hasNext) {
count += 1L
iterator.next()
}
count
}
def getIteratorSizeWithMaxN[T](max: Long)(iterator: Iterator[T]): Long = {
var count = 0L
while (iterator.hasNext && count < max) {
count += 1L
iterator.next()
}
count
}
def lookupMethod(c: Class[_], method: String): Method = {
try {
c.getDeclaredMethod(method)
} catch {
case _: Exception =>
assert(c != classOf[java.lang.Object])
lookupMethod(c.getSuperclass, method)
}
}
def invokeMethod(obj: AnyRef, method: String, args: AnyRef*): AnyRef = {
val m = lookupMethod(obj.getClass, method)
m.invoke(obj, args: _*)
}
/*
* Use reflection to get the path of a partition coming from a Parquet read. This requires accessing Spark
* internal interfaces. It works with Spark 1 and 2 and doesn't depend on the location of the Parquet
* package (parquet vs org.apache.parquet) which can vary between distributions.
*/
def partitionPath(p: Partition): String = {
p.getClass.getCanonicalName match {
case "org.apache.spark.rdd.SqlNewHadoopPartition" =>
val split = invokeMethod(invokeMethod(p, "serializableHadoopSplit"), "value").asInstanceOf[NewFileSplit]
split.getPath.getName
case "org.apache.spark.sql.execution.datasources.FilePartition" =>
val files = invokeMethod(p, "files").asInstanceOf[Seq[_ <: AnyRef]]
assert(files.length == 1)
invokeMethod(files(0), "filePath").asInstanceOf[String]
case "org.apache.spark.rdd.HadoopPartition" =>
val split = invokeMethod(invokeMethod(p, "inputSplit"), "value").asInstanceOf[FileSplit]
split.getPath.getName
}
}
def dictionaryOrdering[T](ords: Ordering[T]*): Ordering[T] = {
new Ordering[T] {
def compare(x: T, y: T): Int = {
var i = 0
while (i < ords.size) {
val v = ords(i).compare(x, y)
if (v != 0)
return v
i += 1
}
return 0
}
}
}
val defaultJSONFormats: Formats = Serialization.formats(NoTypeHints) + GenericIndexedSeqSerializer
def box(i: Int): java.lang.Integer = i
def box(l: Long): java.lang.Long = l
def box(f: Float): java.lang.Float = f
def box(d: Double): java.lang.Double = d
def box(b: Boolean): java.lang.Boolean = b
def intArraySum(a: Array[Int]): Int = {
var s = 0
var i = 0
while (i < a.length) {
s += a(i)
i += 1
}
s
}
def decompress(input: Array[Byte], size: Int): Array[Byte] = {
val expansion = new Array[Byte](size)
val inflater = new Inflater
inflater.setInput(input)
var off = 0
while (off < expansion.length) {
off += inflater.inflate(expansion, off, expansion.length - off)
}
expansion
}
def loadFromResource[T](file: String)(reader: (InputStream) => T): T = {
val resourceStream = Thread.currentThread().getContextClassLoader.getResourceAsStream(file)
assert(resourceStream != null, s"Error while locating file '$file'")
try
reader(resourceStream)
finally
resourceStream.close()
}
def roundWithConstantSum(a: Array[Double]): Array[Int] = {
val withFloors = a.zipWithIndex.map { case (d, i) => (i, d, math.floor(d)) }
val totalFractional = (withFloors.map { case (i, orig, floor) => orig - floor }.sum + 0.5).toInt
withFloors
.sortBy { case (_, orig, floor) => floor - orig }
.zipWithIndex
.map { case ((i, orig, floor), iSort) =>
if (iSort < totalFractional)
(i, math.ceil(orig))
else
(i, math.floor(orig))
}.sortBy(_._1).map(_._2.toInt)
}
def uniqueMinIndex(a: Array[Int]): java.lang.Integer = {
def f(i: Int, m: Int, mi: Int, count: Int): java.lang.Integer = {
if (i == a.length) {
assert(count >= 1)
if (count == 1)
mi
else
null
} else if (a(i) < m)
f(i + 1, a(i), i, 1)
else if (a(i) == m)
f(i + 1, m, mi, count + 1)
else
f(i + 1, m, mi, count)
}
if (a.isEmpty)
null
else
f(1, a(0), 0, 1)
}
def uniqueMaxIndex(a: Array[Int]): java.lang.Integer = {
def f(i: Int, m: Int, mi: Int, count: Int): java.lang.Integer = {
if (i == a.length) {
assert(count >= 1)
if (count == 1)
mi
else
null
} else if (a(i) > m)
f(i + 1, a(i), i, 1)
else if (a(i) == m)
f(i + 1, m, mi, count + 1)
else
f(i + 1, m, mi, count)
}
if (a.isEmpty)
null
else
f(1, a(0), 0, 1)
}
def digitsNeeded(i: Int): Int = {
assert(i >= 0)
if (i < 10)
1
else
1 + digitsNeeded(i / 10)
}
def partFile(numDigits: Int, i: Int): String = {
val is = i.toString
assert(is.length <= numDigits)
"part-" + StringUtils.leftPad(is, numDigits, "0")
}
def partSuffix(ctx: TaskContext): String = {
val rng = new java.security.SecureRandom()
val fileUUID = new java.util.UUID(rng.nextLong(), rng.nextLong())
s"${ ctx.stageId() }-${ ctx.partitionId() }-${ ctx.attemptNumber() }-$fileUUID"
}
def partFile(d: Int, i: Int, ctx: TaskContext): String = s"${ partFile(d, i) }-${ partSuffix(ctx) }"
def mangle(strs: Array[String], formatter: Int => String = "_%d".format(_)): (Array[String], Array[(String, String)]) = {
val b = new ArrayBuilder[String]
val uniques = new mutable.HashSet[String]()
val mapping = new ArrayBuilder[(String, String)]
strs.foreach { s =>
var smod = s
var i = 0
while (uniques.contains(smod)) {
i += 1
smod = s + formatter(i)
}
if (smod != s)
mapping += s -> smod
uniques += smod
b += smod
}
b.result() -> mapping.result()
}
def lift[T, S](pf: PartialFunction[T, S]): (T) => Option[S] = pf.lift
def flatLift[T, S](pf: PartialFunction[T, Option[S]]): (T) => Option[S] = pf.flatLift
def optMatch[T, S](a: T)(pf: PartialFunction[T, S]): Option[S] = lift(pf)(a)
def using[R <: AutoCloseable, T](r: R)(consume: (R) => T): T = {
var caught = false
try {
consume(r)
} catch {
case original: Exception =>
caught = true
try {
r.close()
throw original
} catch {
case duringClose: Exception =>
if (original == duringClose) {
log.info(s"""The exact same exception object, ${original}, was thrown by both
|the consumer and the close method. I will throw the original.""".stripMargin)
throw original
} else {
duringClose.addSuppressed(original)
throw duringClose
}
}
} finally {
if (!caught) {
r.close()
}
}
}
def singletonElement[T](it: Iterator[T]): T = {
val x = it.next()
assert(!it.hasNext)
x
}
// return partition of the ith item
def itemPartition(i: Int, n: Int, k: Int): Int = {
assert(n >= 0)
assert(k > 0)
assert(i >= 0 && i < n)
val minItemsPerPartition = n / k
val r = n % k
if (r == 0)
i / minItemsPerPartition
else {
val maxItemsPerPartition = minItemsPerPartition + 1
val crossover = maxItemsPerPartition * r
if (i < crossover)
i / maxItemsPerPartition
else
r + ((i - crossover) / minItemsPerPartition)
}
}
def partition(n: Int, k: Int): Array[Int] = {
if (k == 0) {
assert(n == 0)
return Array.empty[Int]
}
assert(n >= 0)
assert(k > 0)
val parts = Array.tabulate(k)(i => (n - i + k - 1) / k)
assert(parts.sum == n)
assert(parts.max - parts.min <= 1)
parts
}
def partition(n: Long, k: Int): Array[Long] = {
if (k == 0) {
assert(n == 0)
return Array.empty[Long]
}
assert(n >= 0)
assert(k > 0)
val parts = Array.tabulate(k)(i => (n - i + k - 1) / k)
assert(parts.sum == n)
assert(parts.max - parts.min <= 1)
parts
}
def matchErrorToNone[T, U](f: (T) => U): (T) => Option[U] = (x: T) => {
try {
Some(f(x))
} catch {
case _: MatchError => None
}
}
def charRegex(c: Char): String = {
// See: https://docs.oracle.com/javase/tutorial/essential/regex/literals.html
val metacharacters = "<([{\\\\^-=$!|]})?*+.>"
val s = c.toString
if (metacharacters.contains(c))
"\\\\" + s
else
s
}
def ordMax[T](left: T, right: T, ord: ExtendedOrdering): T = {
if (ord.gt(left, right))
left
else
right
}
def ordMin[T](left: T, right: T, ord: ExtendedOrdering): T = {
if (ord.lt(left, right))
left
else
right
}
def makeJavaMap[K, V](x: TraversableOnce[(K, V)]): java.util.HashMap[K, V] = {
val m = new java.util.HashMap[K, V]
x.foreach { case (k, v) => m.put(k, v) }
m
}
def makeJavaSet[K](x: TraversableOnce[K]): java.util.HashSet[K] = {
val m = new java.util.HashSet[K]
x.foreach(m.add)
m
}
def toMapFast[T, K, V](
ts: TraversableOnce[T]
)(key: T => K,
value: T => V
): collection.Map[K, V] = {
val it = ts.toIterator
val m = mutable.Map[K, V]()
while (it.hasNext) {
val t = it.next
m.put(key(t), value(t))
}
m
}
def toMapIfUnique[K, K2, V](
kvs: Traversable[(K, V)]
)(keyBy: K => K2
): Either[Map[K2, Traversable[K]], Map[K2, V]] = {
val grouped = kvs.groupBy(x => keyBy(x._1))
val dupes = grouped.filter { case (k, m) => m.size != 1 }
if (dupes.nonEmpty) {
Left(dupes.map { case (k, m) => k -> m.map(_._1) })
} else {
Right(grouped
.map { case (k, m) => k -> m.map(_._2).head }
.toMap)
}
}
def dumpClassLoader(cl: ClassLoader) {
System.err.println(s"ClassLoader ${ cl.getClass.getCanonicalName }:")
cl match {
case cl: URLClassLoader =>
System.err.println(s" ${ cl.getURLs.mkString(" ") }")
case _ =>
System.err.println(" non-URLClassLoader")
}
val parent = cl.getParent
if (parent != null)
dumpClassLoader(parent)
}
def writeNativeFileReadMe(fs: FS, path: String): Unit = {
val dateFormat = new SimpleDateFormat("yyyy/MM/dd HH:mm:ss")
using(new OutputStreamWriter(fs.create(path + "/README.txt"))) { out =>
out.write(
s"""This folder comprises a Hail (www.hail.is) native Table or MatrixTable.
| Written with version ${ HailContext.get.version }
| Created at ${ dateFormat.format(new Date()) }""".stripMargin)
}
}
def compress(bb: ArrayBuilder[Byte], input: Array[Byte]): Int = {
val compressor = new Deflater()
compressor.setInput(input)
compressor.finish()
val buffer = new Array[Byte](1024)
var compressedLength = 0
while (!compressor.finished()) {
val nCompressedBytes = compressor.deflate(buffer)
bb ++= (buffer, nCompressedBytes)
compressedLength += nCompressedBytes
}
compressedLength
}
def unwrappedApply[U, T](f: (U, T) => T): (U, Seq[T]) => T = if (f == null) null else { (s, ts) =>
f(s, ts(0))
}
def unwrappedApply[U, T](f: (U, T, T) => T): (U, Seq[T]) => T = if (f == null) null else { (s, ts) =>
val Seq(t1, t2) = ts
f(s, t1, t2)
}
def unwrappedApply[U, T](f: (U, T, T, T) => T): (U, Seq[T]) => T = if (f == null) null else { (s, ts) =>
val Seq(t1, t2, t3) = ts
f(s, t1, t2, t3)
}
def unwrappedApply[U, T](f: (U, T, T, T, T) => T): (U, Seq[T]) => T = if (f == null) null else { (s, ts) =>
val Seq(t1, t2, t3, t4) = ts
f(s, t1, t2, t3, t4)
}
def unwrappedApply[U, T](f: (U, T, T, T, T, T) => T): (U, Seq[T]) => T = if (f == null) null else { (s, ts) =>
val Seq(t1, t2, t3, t4, t5) = ts
f(s, t1, t2, t3, t4, t5)
}
def unwrappedApply[U, T](f: (U, T, T, T, T, T, T) => T): (U, Seq[T]) => T = if (f == null) null else { (s, ts) =>
val Seq(arg1, arg2, arg3, arg4, arg5, arg6) = ts
f(s, arg1, arg2, arg3, arg4, arg5, arg6)
}
def drainInputStreamToOutputStream(
is: InputStream,
os: OutputStream
): Unit = {
val buffer = new Array[Byte](1024)
var length = is.read(buffer)
while (length != -1) {
os.write(buffer, 0, length);
length = is.read(buffer)
}
}
def isJavaIdentifier(id: String): Boolean = {
if (!java.lang.Character.isJavaIdentifierStart(id.head))
return false
var i = 1
while (i < id.length) {
if (!java.lang.Character.isJavaIdentifierPart(id(i)))
return false
i += 1
}
true
}
def commonPrefix[T](left: IndexedSeq[T], right: IndexedSeq[T]): IndexedSeq[T] = {
var i = 0
while (i < left.length && i < right.length && left(i) == right(i))
i += 1
if (i == left.length)
left
else if (i == right.length)
right
else
left.take(i)
}
def decomposeWithName(v: Any, name: String)(implicit formats: Formats): JObject = {
val jo = Extraction.decompose(v).asInstanceOf[JObject]
jo.merge(JObject("name" -> JString(name)))
}
def makeVirtualOffset(fileOffset: Long, blockOffset: Int): Long = {
assert(fileOffset >= 0)
assert(blockOffset >= 0)
assert(blockOffset < 64 * 1024)
(fileOffset << 16) | blockOffset
}
def virtualOffsetBlockOffset(offset: Long): Int = {
(offset & 0xFFFF).toInt
}
def virtualOffsetCompressedOffset(offset: Long): Long = {
offset >> 16
}
def tokenUrlSafe(n: Int): String = {
val bytes = new Array[Byte](32)
val random = new SecureRandom()
random.nextBytes(bytes)
Base64.getUrlEncoder.encodeToString(bytes)
}
}
// FIXME: probably resolved in 3.6 https://github.com/json4s/json4s/commit/fc96a92e1aa3e9e3f97e2e91f94907fdfff6010d
object GenericIndexedSeqSerializer extends Serializer[IndexedSeq[_]] {
val IndexedSeqClass = classOf[IndexedSeq[_]]
override def serialize(implicit format: Formats) = {
case seq: IndexedSeq[_] => JArray(seq.map(Extraction.decompose).toList)
}
override def deserialize(implicit format: Formats) = {
case (TypeInfo(IndexedSeqClass, parameterizedType), JArray(xs)) =>
val typeInfo = TypeInfo(parameterizedType
.map(_.getActualTypeArguments()(0))
.getOrElse(throw new RuntimeException("No type parameter info for type IndexedSeq"))
.asInstanceOf[Class[_]],
None)
xs.map(x => Extraction.extract(x, typeInfo)).toArray[Any]
}
}
|
danking/hail
|
hail/src/main/scala/is/hail/utils/package.scala
|
Scala
|
mit
| 26,460 |
// AORTA is copyright (C) 2012 Dustin Carlino, Mike Depinet, and Piyush
// Khandelwal of UT Austin
// License: GNU GPL v2
package utexas.aorta.map.make
import scala.collection.mutable
import utexas.aorta.map.{Edge, Vertex, Turn, Road}
import utexas.aorta.common.{Util, cfg, TurnID, RoadID}
class Pass3_Part2(graph: PreGraph3) {
val roads_per_vert = new mutable.HashMap[Vertex, mutable.Set[Road]] with mutable.MultiMap[Vertex, Road]
var turn_cnt = -1
def run() {
Util.log("Connecting the dots...")
for (r <- graph.roads) {
roads_per_vert.addBinding(r.v1, r)
roads_per_vert.addBinding(r.v2, r)
}
Util.log_push()
// TODO return the mapping in the future?
for (v <- graph.vertices) {
assert(roads_per_vert.contains(v))
connect_vertex(v, roads_per_vert(v))
}
Util.log_pop()
}
private def next_id(): Int = {
turn_cnt += 1
return turn_cnt
}
// fill out an intersection with turns
private def connect_vertex(v: Vertex, roads: mutable.Set[Road]) {
// TODO return the turns or something so we can more efficiently set them
// TODO cfg
val cross_thresshold = math.Pi / 10 // allow 18 degrees
def make_turn(pair: (Edge, Edge)) = new Turn(new TurnID(next_id), pair._1, pair._2)
// To account for one-ways, we actually want to reason about roads that are
// incoming to or outgoing from this vert.
// Sorting is for determinism.
val incoming_roads = roads.filter(_.incoming_lanes(v).nonEmpty).toList.sortBy(_.id.int)
val outgoing_roads = roads.filter(_.outgoing_lanes(v).nonEmpty).toList.sortBy(_.id.int)
// Only have to test one side
def bad_uturn(r1: Road, r2: Road) = graph.other_side.get(r1) match {
// Only allow this if this intersection only has these two roads
case Some(other) if other == r2 => !(incoming_roads.size == 1 && outgoing_roads.size == 1)
case _ => false
}
// this is a Cartesian product.
for (r1 <- incoming_roads; r2 <- outgoing_roads if r1 != r2 && !bad_uturn(r1, r2)) {
val from_edges = r1.incoming_lanes(v)
val to_edges = r2.outgoing_lanes(v)
// choose arbitrary representatives so we can make queries
val from_rep = from_edges.head
val to_rep = to_edges.head
// we want the angle to go from any 'from' edge to any 'to' edge
val from_angle = from_rep.last_road_line.angle
val to_angle = to_rep.first_road_line.angle
// smallest angle of rotation, from "Agony" on gamedev TODO cite
val angle_btwn = ((from_angle - to_angle + 3 * (math.Pi)) % (2 * math.Pi)) - math.Pi
if (r1.osm_id == r2.osm_id || math.abs(angle_btwn) <= cross_thresshold) {
// a crossing!
// essentially zip the from's to the to's, but handle merging:
// x -> x + n, make the 1 leftmost lead to the n leftmost
// x + n -> x, make the n rightmost lead to the 1 rightmost
// TODO these rules are hard to generalize. when should we have
// left/right-turn only lanes and stuff?
val lane_diff = to_edges.length - from_edges.length
if (lane_diff == 0) {
// exact 1:1 mapping
v.turns ++= from_edges.zip(to_edges).map(make_turn)
} else if (lane_diff < 0) {
// more to less. the rightmost will all have to merge.
// we have 'to_edges.length - 1' regular dsts.
val (mergers, regulars) = from_edges.splitAt(from_edges.length - (to_edges.length - 1))
Util.assert_eq(regulars.length, to_edges.length - 1)
v.turns ++= mergers.map(from => make_turn((from, to_edges.head)))
v.turns ++= regulars.zip(to_edges.tail).map(make_turn)
} else if (lane_diff > 0) {
// less to more. the leftmost gets to pick many destinations.
val lucky_src = from_edges.last
val regular_srcs = from_edges.dropRight(1)
val (regular_dsts, choices) = to_edges.splitAt(to_edges.size - lane_diff - 1)
Util.assert_eq(regular_srcs.size, regular_dsts.size)
v.turns ++= regular_srcs.zip(regular_dsts).map(make_turn)
v.turns ++= choices.map(to => make_turn((lucky_src, to)))
}
} else if (angle_btwn < 0) {
// no multiple turn lanes supported yet. it's just too hard to know when
// this is the case.
v.turns = make_turn((from_rep.leftmost_lane, to_rep.leftmost_lane)) :: v.turns
} else {
v.turns = make_turn((from_rep.rightmost_lane, to_rep.rightmost_lane)) :: v.turns
}
}
}
}
|
dabreegster/aorta
|
utexas/aorta/map/make/Pass3_Part2.scala
|
Scala
|
gpl-2.0
| 4,555 |
//############################################################################
// Bugs
//############################################################################
//############################################################################
// Bug 98
object Bug98Test {
object MyCase { def name = "mycase" }
def test(args: Array[String]): Unit = {
println(MyCase.name)
}
}
//############################################################################
// Bug 120
class Bug120A(x: Int) {
println("A")
}
trait Bug120B {
println("B")
}
class Bug120C(x: Int)
extends Bug120A(Bug120Test.print("one", 1))
with Bug120B {
println("C")
}
object Bug120Test {
def print[A](str: String, res: A): A = {
println(str); res
}
def test(args: Array[String]): Unit = {
val c = new Bug120C(1)
()
}
}
//############################################################################
// Bug 135
object Bug135Test {
import scala.collection.immutable.TreeMap
def test(args: Array[String]): Unit = {
val myMap:TreeMap[Int, String] = new TreeMap
val map1 = myMap + ((42, "The answer"))
println(map1.get(42))
}
}
//############################################################################
// Bug 142
abstract class Bug142Foo1 { class Inner; def foo: Inner; foo; }
abstract class Bug142Foo2 { class Inner; def foo: Inner = {Console.println("ok"); null};}
abstract class Bug142Foo3 { type Inner; def foo: Inner; foo; }
abstract class Bug142Foo4 { type Inner; def foo: Inner = {Console.println("ok"); null.asInstanceOf[Inner]}; }
trait Bug142Bar1 { type Inner; def foo: Inner = {Console.println("ok"); null.asInstanceOf[Inner]}; }
trait Bug142Bar2 { type Inner; def foo: Inner; foo; }
trait Bug142Bar3 { class Inner; def foo: Inner = {Console.println("ok"); null}; }
trait Bug142Bar4 { class Inner; def foo: Inner; foo; }
object Bug142Test1 extends Bug142Foo1 with Bug142Bar1 { def test(args: Array[String]): Unit = {} }
object Bug142Test2 extends Bug142Foo2 with Bug142Bar2 { def test(args: Array[String]): Unit = {} }
object Bug142Test3 extends Bug142Foo3 with Bug142Bar3 { def test(args: Array[String]): Unit = {} }
object Bug142Test4 extends Bug142Foo4 with Bug142Bar4 { def test(args: Array[String]): Unit = {} }
object Bug142Test5 extends Bug142Foo1 with Bug142Bar1 { def test(args: Array[String]): Unit = {} }
object Bug142Test6 extends Bug142Foo2 with Bug142Bar2 { def test(args: Array[String]): Unit = {} }
object Bug142Test7 extends Bug142Foo3 with Bug142Bar3 { def test(args: Array[String]): Unit = {} }
object Bug142Test8 extends Bug142Foo4 with Bug142Bar4 { def test(args: Array[String]): Unit = {} }
object Bug142Test {
def test(args:Array[String]): Unit = {
Bug142Test1;
Bug142Test2;
Bug142Test3;
Bug142Test4;
Bug142Test5;
Bug142Test6;
Bug142Test7;
Bug142Test8;
()
}
}
//############################################################################
// Bug 166
object Bug166Test {
import scala.collection.mutable.HashMap
def test(args: Array[String]): Unit = {
val m: HashMap[String,String] = new HashMap[String, String]
m.update("foo","bar")
}
}
//############################################################################
// Bug 167
class Bug167Node(bar:Int) {
val foo = {
val bar = 1;
bar
}
}
object Bug167Test {
def test(args: Array[String]): Unit = {
if (new Bug167Node(0).foo != 1) println("bug 167");
}
}
//############################################################################
// Bug 168
class Bug168Foo {
class Bar
def foo = new Bar
}
object Bug168Test {
def test(args: Array[String]): Unit = {
(new Bug168Foo).foo
()
}
}
//############################################################################
// Bug 174
class Bug174Foo[X] {
class Tree
class Node extends Tree
val inner: Inner = new SubInner
trait Inner {
def test: Bug174Foo[X]#Tree
}
class SubInner extends Inner {
def test = new Node
}
}
object Bug174Test {
def test(args: Array[String]): Unit = {
(new Bug174Foo[Int]).inner.test
()
}
}
//############################################################################
// Bug 176
trait Bug176A {
type T;
def foo(x: T): Int;
def bar: T;
def test = foo(bar);
}
trait Bug176B {
type S <: AnyRef;
type T = S;
def foo(x: S): Int;
def bar: S;
}
class Bug176C extends Bug176A with Bug176B {
class S;
def foo(x: S) = 1;
def bar = new S;
}
object Bug176Test {
def test(args: Array[String]): Unit = {
val x: Bug176A = new Bug176C;
Console.println(x.test);
}
}
//############################################################################
// Bug 199
class Bug199C { object o; }
object Bug199Test {
def test(args: Array[String]) = {
(new Bug199C).o; ()
}
}
//############################################################################
// Bug 213
trait Bug213Foo {
def testAll: Unit;
def testAllRef: String;
}
class Bug213Bar extends Bug213Foo {
def testAll = (().asInstanceOf[Nothing] : Nothing);
def testAllRef = ("".asInstanceOf[Null] : Null);
}
object Bug213Test {
def test(args: Array[String]): Unit = {
val foo: Bug213Foo = new Bug213Bar;
try {
foo.testAll;
} catch {
case e: ClassCastException =>
Console.println("Cannot cast unit to Nothing");
}
try {
foo.testAllRef;
} catch {
case e: ClassCastException =>
Console.println("Cannot cast empty string to Null");
}
()
}
}
//############################################################################
// Bug 217
object Bug217Test {
def foo[t](fun: Function0[t]): t = fun();
def bar(x: Int): Unit = {
foo(() => 0);
()
}
def test(args: Array[String]): Unit = bar(32);
}
//############################################################################
// Bug 222
object Bug222Test {
def test(args:Array[String]): Unit = {
val array: Array[String] = new Array(16);
()
}
}
//############################################################################
// Bug 225
case class Bug225C();
object Bug225Test {
def test(args: Array[String]): Unit = {
val a = new Array[Array[Bug225C]](2);
a(0) = new Array[Bug225C](2);
a(0)(0) = new Bug225C();
}
}
//############################################################################
// Bug 226
object Bug226Test {
def id[a](xs: Array[a]): Array[a] = xs;
def test(args: Array[String]): Unit = {
var xs = new Array[Int](1);
class X { xs };
xs = id(xs);
id(xs);
()
}
}
//############################################################################
// Bug 233
object Bug233Test {
val b: Array[String] = null;
def test(args: Array[String]): Unit =
Console.println(b == null);
}
//############################################################################
// Bug 250
object Bug250Test {
def test(args: Array[String]): Unit = {
if (true) null;
()
}
}
//############################################################################
// Bug 257
object Bug257Test {
def sayhello(): Unit = { Console.println("I should come 1st and 2nd"); };
def sayhi(): Unit = { Console.println("I should come last"); };
def f1(x: Unit): Unit = ();
def f2(x: Unit)(y: Unit): Unit = ();
def f(x: => Unit): Unit => Unit = {
f1(x);
f2(x);
}
def test(args: Array[String]): Unit = {
f(sayhello())(sayhi())
}
}
//############################################################################
// Bug 266
// version - A
abstract class Bug266AFoo {
type T >: Null <: AnyRef;
abstract class I0 { def f(x: T): Unit; f(null); }
}
object Bug266ATest extends Bug266AFoo {
type T = String;
class I1 extends I0 { def f(x: String): Unit = { Console.println("hello") } }
def test(args: Array[String]): Unit = { new I1; () }
}
// version - B
abstract class Bug266BA {
type t
abstract class P {
def f(x: t): Unit
}
}
abstract class Bug266BA1 extends Bug266BA {
def mkP: Bug266BA1.this.P;
val in: t;
}
trait Bug266BB extends Bug266BA {
type t = Int;
class P1 extends Bug266BB.this.P {
def f(x: Int): Unit = { Console.println(x + 1) }
}
def mkP = new P1;
val in = 3;
}
object Bug266BTest {
val a: Bug266BA1 = new Bug266BA1 with Bug266BB;
def test(args: Array[String]): Unit = a.mkP.f(a.in);
}
// main
object Bug266Test {
def test(args: Array[String]): Unit = {
Bug266ATest.test(args);
Bug266BTest.test(args);
}
}
//############################################################################
// Bug 316
class Bug316MyIterator extends Iterator[Int] {
def hasNext = false
def next = 42
}
object Bug316Test {
def test(args: Array[String]): Unit =
(new Bug316MyIterator) filter { x: Int => x == 1 };
}
//############################################################################
// Bug 328
object Bug328Test {
def test0(f: Function1[Int,String]): Unit = {}
def test(args: Array[String]): Unit = test0(args);
}
//############################################################################
// Bug 396
trait Bug396A {
class I {
def run = Console.println("A");
}
}
trait Bug396B extends Bug396A {
class I extends super.I {
override def run = { super.run; Console.println("B"); }
}
}
trait Bug396C extends Bug396A {
trait I extends super.I {
override def run = { super.run; Console.println("C"); }
}
}
object Bug396Test extends Bug396B with Bug396C {
class I2 extends super[Bug396B].I with super[Bug396C].I;
def test(args: Array[String]): Unit = (new I2).run
}
//############################################################################
// Bug 399
object Bug399Test {
def f(x: String): String = {
trait C { def f: String = x; }
class D extends C;
trait F extends C;
class G extends D with F;
(new G).f
}
def test(args: Array[String]): Unit = {
Console.println(f("a"));
}
}
//############################################################################
// Main
object Test {
var errors: Int = 0
def test(bug: Int, test: => Unit): Unit = {
Console.println("<<< bug " + bug)
try {
test;
} catch {
case exception: Throwable =>
Console.print("Exception in thread \"" + Thread.currentThread + "\" " + exception);
Console.println;
errors += 1
}
Console.println(">>> bug " + bug)
Console.println
}
def main(args: Array[String]): Unit = {
test( 98, Bug98Test.test(args));
test(120, Bug120Test.test(args));
test(135, Bug135Test.test(args));
test(142, Bug142Test.test(args));
test(166, Bug166Test.test(args));
test(167, Bug167Test.test(args));
test(168, Bug168Test.test(args));
test(174, Bug174Test.test(args));
test(176, Bug176Test.test(args));
test(199, Bug199Test.test(args));
test(213, Bug213Test.test(args));
test(217, Bug217Test.test(args));
test(222, Bug222Test.test(args));
test(225, Bug225Test.test(args));
test(226, Bug226Test.test(args));
test(233, Bug233Test.test(args));
test(250, Bug250Test.test(args));
test(257, Bug257Test.test(args));
test(266, Bug266Test.test(args));
test(316, Bug316Test.test(args));
test(328, Bug328Test.test(args));
test(396, Bug396Test.test(args));
test(399, Bug399Test.test(args));
if (errors > 0) {
Console.println;
Console.println(s"$errors error" + (if (errors > 1) "s" else ""));
}
}
}
//############################################################################
|
martijnhoekstra/scala
|
test/files/run/bugs.scala
|
Scala
|
apache-2.0
| 11,567 |
package com.ibm.spark.magic
import com.ibm.spark.utils.DynamicReflectionSupport
import scala.language.dynamics
class MagicExecutor(magicLoader: MagicLoader) extends Dynamic {
val executeMethod = classOf[Magic].getDeclaredMethods.head.getName
def applyDynamic(name: String)(args: Any*): Either[CellMagicOutput, LineMagicOutput] = {
val className = magicLoader.magicClassName(name)
val isCellMagic = magicLoader.hasCellMagic(className)
val isLineMagic = magicLoader.hasLineMagic(className)
(isCellMagic, isLineMagic) match {
case (true, false) =>
val result = executeMagic(className, args)
Left(result.asInstanceOf[CellMagicOutput])
case (false, true) =>
executeMagic(className, args)
Right(LineMagicOutput)
case (_, _) =>
Left(CellMagicOutput("text/plain" ->
s"Magic ${className} could not be executed."))
}
}
private def executeMagic(className: String, args: Seq[Any]) = {
val inst = magicLoader.createMagicInstance(className)
val dynamicSupport = new DynamicReflectionSupport(inst.getClass, inst)
dynamicSupport.applyDynamic(executeMethod)(args)
}
}
|
yeghishe/spark-kernel
|
kernel-api/src/main/scala/com/ibm/spark/magic/MagicExecutor.scala
|
Scala
|
apache-2.0
| 1,168 |
package nl.soqua.lcpi.repl.lib
import nl.soqua.lcpi.ast.lambda.Expression
import nl.soqua.lcpi.interpreter.show.Show
import nl.soqua.lcpi.interpreter.transformation.Stringify
import nl.soqua.lcpi.interpreter.{InterpreterResult, SingleExpressionInterpreterResult, TraceInterpreterResult}
trait ExpressionRenderer {
import Show._
protected val lb: String = System.lineSeparator()
def mightReplaceLambda(toggle: AsciiModeToggle): String => String = toggle match {
case Disabled => identity
case Enabled => _ replaceAll("Ξ»", "\\\\\\\\\\\\\\\\")
}
private def renderInterpreterResult(interpreterResult: InterpreterResult): String =
renderTrace(interpreterResult) compose Stringify(interpreterResult.expression)
private def traceFold(acc: ShowS, tuple: (String, Expression)) =
acc compose tuple._1 compose " => " compose Stringify(tuple._2) compose lb
private def renderTrace(interpreterResult: InterpreterResult): ShowS = interpreterResult match {
case SingleExpressionInterpreterResult(_, _) => empty
case TraceInterpreterResult(_, _, trace) => trace.foldLeft(empty)(traceFold)
}
def renderEvaluationResult(asciiModeToggle: AsciiModeToggle)(interpreterResult: InterpreterResult): String =
mightReplaceLambda(asciiModeToggle)(renderInterpreterResult(interpreterResult))
}
|
kevinvandervlist/lcpi
|
repl/src/main/scala/nl/soqua/lcpi/repl/lib/ExpressionRenderer.scala
|
Scala
|
mit
| 1,316 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.plan.rules.logical
import org.apache.flink.api.scala._
import org.apache.flink.table.plan.optimize.program.FlinkStreamProgram
import org.apache.flink.table.util.TableTestBase
import org.junit.{Before, Test}
/**
* Test for [[CalcRankTransposeRule]].
*/
class CalcRankTransposeRuleTest extends TableTestBase {
private val util = streamTestUtil()
@Before
def setup(): Unit = {
util.buildStreamProgram(FlinkStreamProgram.PHYSICAL)
util.addDataStream[(Int, String, Long)]("MyTable", 'a, 'b, 'c, 'rowtime)
util.addTableSource[(String, Int, String)]("T", 'category, 'shopId, 'price)
}
@Test
def testPruneOrderKeys(): Unit = {
// Push Calc into Rank, project column (a, rowtime), prune column (b, c)
val sql =
"""
|SELECT a FROM (
| SELECT *,
| ROW_NUMBER() OVER (PARTITION BY a ORDER BY rowtime DESC) as rank_num
| FROM MyTable)
|WHERE rank_num = 1
""".stripMargin
util.verifyPlan(sql)
}
@Test
def testPrunePartitionKeys(): Unit = {
// Push Calc into Rank, project column (a, rowtime), prune column (b, c)
val sql =
"""
|SELECT rowtime FROM (
| SELECT *,
| ROW_NUMBER() OVER (PARTITION BY a ORDER BY rowtime DESC) as rank_num
| FROM MyTable)
|WHERE rank_num = 1
""".stripMargin
util.verifyPlan(sql)
}
@Test
def testPruneUniqueKeys(): Unit = {
// Push Calc into Rank, project column (category, shopId, max_price), prune column (min_price)
val sql =
"""
|SELECT category, max_price, rank_num FROM (
| SELECT *,
| ROW_NUMBER() OVER (PARTITION BY category ORDER BY max_price ASC) as rank_num
| FROM (
| SELECT category, shopId, max(price) as max_price, min(price) as min_price
| FROM T
| GROUP BY category, shopId
| ))
|WHERE rank_num <= 3
""".stripMargin
util.verifyPlan(sql)
}
@Test
def testNotTranspose(): Unit = {
// Not transpose calc into Rank because there is no columns to prune
val sql =
"""
|SELECT category, max_price, rank_num FROM (
| SELECT *,
| ROW_NUMBER() OVER (PARTITION BY category ORDER BY max_price ASC) as rank_num
| FROM (
| SELECT category, shopId, max(price) as max_price
| FROM T
| GROUP BY category, shopId
| ))
|WHERE rank_num <= 3
""".stripMargin
util.verifyPlan(sql)
}
@Test
def testPruneRankNumber(): Unit = {
// Push Calc into Rank, project column (a, rowtime), prune column (b, c)
val sql =
"""
|SELECT a, rowtime FROM (
| SELECT *,
| ROW_NUMBER() OVER (PARTITION BY a ORDER BY rowtime DESC) as rank_num
| FROM MyTable)
|WHERE rank_num <= 2
""".stripMargin
util.verifyPlan(sql)
}
@Test
def testProjectRankNumber(): Unit = {
// Push Calc into Rank, project column (a, rowtime), prune column (b, c)
// Need a New Calc on top of Rank to keep equivalency
val sql =
"""
|SELECT rank_num, rowtime, a, rank_num, a, rank_num FROM (
| SELECT *,
| ROW_NUMBER() OVER (PARTITION BY a ORDER BY rowtime DESC) as rank_num
| FROM MyTable)
|WHERE rank_num <= 2
""".stripMargin
util.verifyPlan(sql)
}
@Test
def testTrivialCalcIsRemoved(): Unit = {
// Push Calc into Rank, project column (a, rowtime), prune column (b, c)
// Does not need a New Calc on top of Rank because it is trivial
val sql =
"""
|SELECT a, rowtime, rank_num FROM (
| SELECT *,
| ROW_NUMBER() OVER (PARTITION BY a ORDER BY rowtime DESC) as rank_num
| FROM MyTable)
|WHERE rank_num <= 2
""".stripMargin
util.verifyPlan(sql)
}
@Test
def testPushCalcWithConditionIntoRank(): Unit = {
// Push Calc into Rank even if it has filter condition, project column(rowtime, c, a), prune(b)
val sql =
"""
|SELECT rowtime, c FROM (
| SELECT *,
| ROW_NUMBER() OVER (PARTITION BY a ORDER BY rowtime DESC) as rank_num
| FROM MyTable)
|WHERE rank_num <= 2 AND a > 10
""".stripMargin
util.verifyPlan(sql)
}
@Test
def testPruneUnusedProject(): Unit = {
// Push Calc into Rank, project(category, shopId, max_price), prune (min_price)
val sql =
"""
|SELECT category, shopId, max_price, rank_num
|FROM (
| SELECT category, shopId, max_price,
| ROW_NUMBER() OVER (PARTITION BY category ORDER BY max_price ASC) as rank_num
| FROM (
| SELECT category, shopId, max(price) as max_price, min(price) as min_price
| FROM T
| GROUP BY category, shopId
| ))
|WHERE rank_num <= 3
""".stripMargin
util.verifyPlan(sql)
}
}
|
shaoxuan-wang/flink
|
flink-table/flink-table-planner-blink/src/test/scala/org/apache/flink/table/plan/rules/logical/CalcRankTransposeRuleTest.scala
|
Scala
|
apache-2.0
| 5,764 |
package com.mayreh.thankyou
import org.scalatest.FlatSpec
class GitHubRepoTest extends FlatSpec {
it should "parse https github web url" in {
assert(GitHubRepo.fromUrl("https://github.com/opt-tech/chronoscala") == Some(GitHubRepo("opt-tech", "chronoscala")))
}
it should "parse https github repo url" in {
assert(GitHubRepo.fromUrl("https://github.com/opt-tech/chronoscala.git") == Some(GitHubRepo("opt-tech", "chronoscala")))
}
it should "parse ssh github repo url" in {
assert(GitHubRepo.fromUrl("[email protected]:opt-tech/chronoscala.git") == Some(GitHubRepo("opt-tech", "chronoscala")))
}
it should "return None if non-github web url" in {
assert(GitHubRepo.fromUrl("http://www.example.com").isEmpty)
}
it should "return None if non-github url" in {
assert(GitHubRepo.fromUrl("git://git.apache.org/kafka.git").isEmpty)
}
}
|
ocadaruma/sbt-thank-you-stars
|
src/test/scala/com/mayreh/thankyou/GitHubRepoTest.scala
|
Scala
|
apache-2.0
| 872 |
package chess
package format
import Pos._
class UciMoveTest extends ChessTest {
import pgn.Fixtures._
"piotr encoding" should {
"be reflexive" in {
val move = UciMove("a2g7").get
UciMove piotr move.piotr must_== move.some
}
}
}
|
psuter/scalachess
|
src/test/scala/format/UciMoveTest.scala
|
Scala
|
mit
| 258 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.serializer
import java.io.{BufferedInputStream, BufferedOutputStream, InputStream, OutputStream}
import java.nio.ByteBuffer
import scala.reflect.ClassTag
import org.apache.spark.SparkConf
import org.apache.spark.io.CompressionCodec
import org.apache.spark.security.CryptoStreamUtils
import org.apache.spark.storage._
import org.apache.spark.util.io.{ChunkedByteBuffer, ChunkedByteBufferOutputStream}
/**
* Component which configures serialization, compression and encryption for various Spark
* components, including automatic selection of which [[Serializer]] to use for shuffles.
*/
private[spark] class SerializerManager(
defaultSerializer: Serializer,
conf: SparkConf,
encryptionKey: Option[Array[Byte]]) {
def this(defaultSerializer: Serializer, conf: SparkConf) = this(defaultSerializer, conf, None)
private[this] val kryoSerializer = new KryoSerializer(conf)
private[this] val stringClassTag: ClassTag[String] = implicitly[ClassTag[String]]
private[this] val primitiveAndPrimitiveArrayClassTags: Set[ClassTag[_]] = {
val primitiveClassTags = Set[ClassTag[_]](
ClassTag.Boolean,
ClassTag.Byte,
ClassTag.Char,
ClassTag.Double,
ClassTag.Float,
ClassTag.Int,
ClassTag.Long,
ClassTag.Null,
ClassTag.Short
)
val arrayClassTags = primitiveClassTags.map(_.wrap)
primitiveClassTags ++ arrayClassTags
}
// Whether to compress broadcast variables that are stored
private[this] val compressBroadcast = conf.getBoolean("spark.broadcast.compress", true)
// Whether to compress shuffle output that are stored
private[this] val compressShuffle = conf.getBoolean("spark.shuffle.compress", true)
// Whether to compress RDD partitions that are stored serialized
private[this] val compressRdds = conf.getBoolean("spark.rdd.compress", false)
// Whether to compress shuffle output temporarily spilled to disk
private[this] val compressShuffleSpill = conf.getBoolean("spark.shuffle.spill.compress", true)
/* The compression codec to use. Note that the "lazy" val is necessary because we want to delay
* the initialization of the compression codec until it is first used. The reason is that a Spark
* program could be using a user-defined codec in a third party jar, which is loaded in
* Executor.updateDependencies. When the BlockManager is initialized, user level jars hasn't been
* loaded yet. */
private lazy val compressionCodec: CompressionCodec = CompressionCodec.createCodec(conf)
def encryptionEnabled: Boolean = encryptionKey.isDefined
def canUseKryo(ct: ClassTag[_]): Boolean = {
primitiveAndPrimitiveArrayClassTags.contains(ct) || ct == stringClassTag
}
// SPARK-18617: As feature in SPARK-13990 can not be applied to Spark Streaming now. The worst
// result is streaming job based on `Receiver` mode can not run on Spark 2.x properly. It may be
// a rational choice to close `kryo auto pick` feature for streaming in the first step.
def getSerializer(ct: ClassTag[_], autoPick: Boolean): Serializer = {
if (autoPick && canUseKryo(ct)) {
kryoSerializer
} else {
defaultSerializer
}
}
/**
* Pick the best serializer for shuffling an RDD of key-value pairs.
*/
def getSerializer(keyClassTag: ClassTag[_], valueClassTag: ClassTag[_]): Serializer = {
if (canUseKryo(keyClassTag) && canUseKryo(valueClassTag)) {
kryoSerializer
} else {
defaultSerializer
}
}
private def shouldCompress(blockId: BlockId): Boolean = {
blockId match {
case _: ShuffleBlockId => compressShuffle
case _: BroadcastBlockId => compressBroadcast
case _: RDDBlockId => compressRdds
case _: TempLocalBlockId => compressShuffleSpill
case _: TempShuffleBlockId => compressShuffle
case _ => false
}
}
/**
* Wrap an input stream for encryption and compression
*/
def wrapStream(blockId: BlockId, s: InputStream): InputStream = {
wrapForCompression(blockId, wrapForEncryption(s))
}
/**
* Wrap an output stream for encryption and compression
*/
def wrapStream(blockId: BlockId, s: OutputStream): OutputStream = {
wrapForCompression(blockId, wrapForEncryption(s))
}
/**
* Wrap an input stream for encryption if shuffle encryption is enabled
*/
def wrapForEncryption(s: InputStream): InputStream = {
encryptionKey
.map { key => CryptoStreamUtils.createCryptoInputStream(s, conf, key) }
.getOrElse(s)
}
/**
* Wrap an output stream for encryption if shuffle encryption is enabled
*/
def wrapForEncryption(s: OutputStream): OutputStream = {
encryptionKey
.map { key => CryptoStreamUtils.createCryptoOutputStream(s, conf, key) }
.getOrElse(s)
}
/**
* Wrap an output stream for compression if block compression is enabled for its block type
*/
def wrapForCompression(blockId: BlockId, s: OutputStream): OutputStream = {
if (shouldCompress(blockId)) compressionCodec.compressedOutputStream(s) else s
}
/**
* Wrap an input stream for compression if block compression is enabled for its block type
*/
def wrapForCompression(blockId: BlockId, s: InputStream): InputStream = {
if (shouldCompress(blockId)) compressionCodec.compressedInputStream(s) else s
}
/** Serializes into a stream. */
def dataSerializeStream[T: ClassTag](
blockId: BlockId,
outputStream: OutputStream,
values: Iterator[T]): Unit = {
val byteStream = new BufferedOutputStream(outputStream)
val autoPick = !blockId.isInstanceOf[StreamBlockId]
val ser = getSerializer(implicitly[ClassTag[T]], autoPick).newInstance()
ser.serializeStream(wrapForCompression(blockId, byteStream)).writeAll(values).close()
}
/** Serializes into a chunked byte buffer. */
def dataSerialize[T: ClassTag](
blockId: BlockId,
values: Iterator[T]): ChunkedByteBuffer = {
dataSerializeWithExplicitClassTag(blockId, values, implicitly[ClassTag[T]])
}
/** Serializes into a chunked byte buffer. */
def dataSerializeWithExplicitClassTag(
blockId: BlockId,
values: Iterator[_],
classTag: ClassTag[_]): ChunkedByteBuffer = {
val bbos = new ChunkedByteBufferOutputStream(1024 * 1024 * 4, ByteBuffer.allocate)
val byteStream = new BufferedOutputStream(bbos)
val autoPick = !blockId.isInstanceOf[StreamBlockId]
val ser = getSerializer(classTag, autoPick).newInstance()
ser.serializeStream(wrapForCompression(blockId, byteStream)).writeAll(values).close()
bbos.toChunkedByteBuffer
}
/**
* Deserializes an InputStream into an iterator of values and disposes of it when the end of
* the iterator is reached.
*/
def dataDeserializeStream[T](
blockId: BlockId,
inputStream: InputStream)
(classTag: ClassTag[T]): Iterator[T] = {
val stream = new BufferedInputStream(inputStream)
val autoPick = !blockId.isInstanceOf[StreamBlockId]
getSerializer(classTag, autoPick)
.newInstance()
.deserializeStream(wrapForCompression(blockId, inputStream))
.asIterator.asInstanceOf[Iterator[T]]
}
}
|
SHASHANKB/spark
|
core/src/main/scala/org/apache/spark/serializer/SerializerManager.scala
|
Scala
|
apache-2.0
| 7,954 |
/*
*************************************************************************************
* Copyright 2011 Normation SAS
*************************************************************************************
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* In accordance with the terms of section 7 (7. Additional Terms.) of
* the GNU Affero GPL v3, the copyright holders add the following
* Additional permissions:
* Notwithstanding to the terms of section 5 (5. Conveying Modified Source
* Versions) and 6 (6. Conveying Non-Source Forms.) of the GNU Affero GPL v3
* licence, when you create a Related Module, this Related Module is
* not considered as a part of the work and may be distributed under the
* license agreement of your choice.
* A "Related Module" means a set of sources files including their
* documentation that, without modification of the Source Code, enables
* supplementary functions or services in addition to those offered by
* the Software.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/agpl.html>.
*
*************************************************************************************
*/
package bootstrap.liftweb
/**
* An application context with some configuration
* variables
*/
object RudderContext {
//Is the root server correctly defined ?
var rootNodeNotDefined = true
}
|
Kegeruneku/rudder
|
rudder-web/src/main/scala/bootstrap/liftweb/RudderContext.scala
|
Scala
|
agpl-3.0
| 1,871 |
package com.twitter.querulous.config
import com.twitter.querulous._
import com.twitter.querulous.database.DatabaseFactory
import com.twitter.querulous.query.QueryFactory
import com.twitter.util.Duration
import evaluator._
trait AutoDisablingQueryEvaluator {
def errorCount: Int
def interval: Duration
}
class QueryEvaluator {
var database: Database = new Database
var query: Query = new Query
var singletonFactory = false
var autoDisable: Option[AutoDisablingQueryEvaluator] = None
def autoDisable_=(a: AutoDisablingQueryEvaluator) { autoDisable = Some(a) }
private var memoizedFactory: Option[QueryEvaluatorFactory] = None
def apply(stats: StatsCollector): QueryEvaluatorFactory = apply(stats, None, None)
def apply(stats: StatsCollector, dbStatsFactory: DatabaseFactory => DatabaseFactory, queryStatsFactory: QueryFactory => QueryFactory): QueryEvaluatorFactory = apply(stats, Some(dbStatsFactory), Some(queryStatsFactory))
def apply(stats: StatsCollector, dbStatsFactory: Option[DatabaseFactory => DatabaseFactory], queryStatsFactory: Option[QueryFactory => QueryFactory]): QueryEvaluatorFactory = {
synchronized {
if (!singletonFactory) memoizedFactory = None
memoizedFactory = memoizedFactory orElse {
var factory: QueryEvaluatorFactory = new StandardQueryEvaluatorFactory(database(stats, dbStatsFactory), query(stats, queryStatsFactory))
autoDisable.foreach { disable =>
factory = new AutoDisablingQueryEvaluatorFactory(
factory, disable.errorCount, disable.interval
)
}
Some(factory)
}
memoizedFactory.get
}
}
def apply(): QueryEvaluatorFactory = apply(NullStatsCollector)
}
|
twitter/querulous
|
querulous-core/src/main/scala/com/twitter/querulous/config/QueryEvaluator.scala
|
Scala
|
apache-2.0
| 1,716 |
/*
* Copyright (C) 2011 Mathias Doenitz
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cc.spray.can
import org.specs2._
import HttpProtocols._
import matcher.DataTables
import java.nio.ByteBuffer
class ResponsePreparerSpec extends Specification with ResponsePreparer with DataTables { def is =
"The response preparation logic should properly render" ^
"a response with status 200, no headers and no body" ! e1^
"a response with status 304, a few headers and no body" ! e2^
"a response with status 400, a few headers and a body" ! e3^
"a non-keepalive HTTP/1.0 message" ! e4^
"a chunked response without body" ! e5^
"a chunked response with body" ! e6^
"a response chunk" ! e7^
"a terminating response chunk" ! e8^
end^
"The 'Connection' header should be rendered correctly" ! e9
def e1 = prep() {
HttpResponse(200, Nil)
} mustEqual prep {
"""|HTTP/1.1 200 OK
|Server: spray-can/1.0.0
|Date: Thu, 25 Aug 2011 09:10:29 GMT
|Content-Length: 0
|
|""" -> false
}
def e2 = prep() {
HttpResponse(304, List(
HttpHeader("X-Fancy", "of course"),
HttpHeader("Age", "0")
))
} mustEqual prep {
"""|HTTP/1.1 304 Not Modified
|X-Fancy: of course
|Age: 0
|Server: spray-can/1.0.0
|Date: Thu, 25 Aug 2011 09:10:29 GMT
|Content-Length: 0
|
|""" -> false
}
def e3 = prep() {
HttpResponse(
status = 400,
headers = List(HttpHeader("Age", "30"), HttpHeader("Connection", "Keep-Alive")),
body = "Small f*ck up overhere!".getBytes("ASCII"),
protocol = `HTTP/1.0`
)
} mustEqual prep {
"""|HTTP/1.0 400 Bad Request
|Age: 30
|Connection: Keep-Alive
|Server: spray-can/1.0.0
|Date: Thu, 25 Aug 2011 09:10:29 GMT
|Content-Length: 23
|
|Small f*ck up overhere!""" -> false
}
def e4 = prep() {
HttpResponse(
status = 200,
headers = List(HttpHeader("Age", "30"), HttpHeader("Cache-Control", "public")),
body = "Small f*ck up overhere!".getBytes("ASCII"),
protocol = `HTTP/1.0`
)
} mustEqual prep {
"""|HTTP/1.0 200 OK
|Age: 30
|Cache-Control: public
|Server: spray-can/1.0.0
|Date: Thu, 25 Aug 2011 09:10:29 GMT
|
|Small f*ck up overhere!""" -> true
}
def e5 = prep(reqConnectionHeader = Some("close"), chunked = true) {
HttpResponse(200, List(HttpHeader("Age", "30")))
} mustEqual prep {
"""|HTTP/1.1 200 OK
|Age: 30
|Connection: close
|Server: spray-can/1.0.0
|Date: Thu, 25 Aug 2011 09:10:29 GMT
|Transfer-Encoding: chunked
|
|""" -> true
}
def e6 = prep(chunked = true) {
HttpResponse().withBody("Yahoooo")
} mustEqual prep {
"""|HTTP/1.1 200 OK
|Server: spray-can/1.0.0
|Date: Thu, 25 Aug 2011 09:10:29 GMT
|Transfer-Encoding: chunked
|
|7
|Yahoooo
|""" -> false
}
def e7 = decode(
prepareChunk(
List(ChunkExtension("key", "value"), ChunkExtension("another", "tl;dr")),
"body123".getBytes("ISO-8859-1")
)
) mustEqual prep {
"""|7;key=value;another="tl;dr"
|body123
|"""
}
def e8 = decode(
prepareFinalChunk(Nil, List(HttpHeader("Age", "30"), HttpHeader("Cache-Control", "public")))
) mustEqual prep {
"""|0
|Age: 30
|Cache-Control: public
|
|"""
}
val NONE: Option[String] = None
def e9 =
"Client Version" | "Request" | "Response" | "Rendered" | "Close" |
`HTTP/1.1` ! NONE ! NONE ! NONE ! false |
`HTTP/1.1` ! Some("close") ! NONE ! Some("close") ! true |
`HTTP/1.1` ! Some("Keep-Alive") ! NONE ! NONE ! false |
`HTTP/1.0` ! NONE ! NONE ! NONE ! true |
`HTTP/1.0` ! Some("close") ! NONE ! NONE ! true |
`HTTP/1.0` ! Some("Keep-Alive") ! NONE ! Some("Keep-Alive") ! false |
`HTTP/1.1` ! NONE ! Some("close") ! Some("close") ! true |
`HTTP/1.0` ! Some("close") ! Some("Keep-Alive") ! Some("Keep-Alive") ! false |> {
(reqProto, reqCH, resCH, renCH, close) =>
prep(reqProto, reqCH) {
HttpResponse(200, resCH.map(h => List(HttpHeader("Connection", h))).getOrElse(Nil))
} mustEqual prep {
"HTTP/1.1 200 OK\n" +
renCH.map("Connection: " + _ + "\n").getOrElse("") +
"Server: spray-can/1.0.0\n" +
"Date: Thu, 25 Aug 2011 09:10:29 GMT\n" +
"Content-Length: 0\n\n" -> close
}
}
def prep(reqProtocol: HttpProtocol = `HTTP/1.1`, reqConnectionHeader: Option[String] = None, chunked: Boolean = false)
(response: HttpResponse) = {
val sb = new java.lang.StringBuilder()
val (buffers, closeAfterWrite) = {
if (chunked) prepareChunkedResponseStart(RequestLine(protocol = reqProtocol), response, reqConnectionHeader)
else prepareResponse(RequestLine(protocol = reqProtocol), response, reqConnectionHeader)
}
sb.append(decode(buffers))
sb.toString -> closeAfterWrite
}
def decode(buffers: List[ByteBuffer]) = {
val sb = new java.lang.StringBuilder()
buffers.foreach { buf =>
sb.append(new String(buf.array, "ASCII"))
}
sb.toString
}
def prep(t: (String, Boolean)): (String, Boolean) = t._1.stripMargin.replace("\n", "\r\n") -> t._2
def prep(s: String): String = s.stripMargin.replace("\n", "\r\n")
override val dateTimeNow = DateTime(2011, 8, 25, 9,10,29) // provide a stable date for testing
protected def serverHeader = "spray-can/1.0.0"
}
|
spray/spray-can
|
spray-can/src/test/scala/cc/spray/can/ResponsePreparerSpec.scala
|
Scala
|
apache-2.0
| 6,588 |
package org.opencommercesearch.api.controllers
/*
* Licensed to OpenCommerceSearch under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. OpenCommerceSearch licenses this
* file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import play.api.mvc._
import org.opencommercesearch.api.Global._
object ApiDocController extends Controller {
def index = Action {
Ok(views.html.index(getConfig("swagger.api.basepath", "http://localhost:9000")))
}
}
|
madickson/opencommercesearch
|
opencommercesearch-api/app/org/opencommercesearch/api/controllers/ApiDocController.scala
|
Scala
|
apache-2.0
| 1,053 |
import scala.quoted.*
trait IsExpr[T] {
type Underlying
def toExpr(x: T): Expr[Underlying]
}
given [U]: IsExpr[Expr[U]] = new IsExpr[Expr[U]] {
type Underlying = U
def toExpr(x: Expr[U]): Expr[U] = x
}
def f(x: Any): String = x.toString
def g[T](x: T)(using e: IsExpr[T])(using tu: Type[e.Underlying]): Quotes ?=> Expr[String] = {
val underlying: Expr[e.Underlying] = e.toExpr(x)
'{f($underlying)}
}
inline def mcr(): Any = ${mcrImpl}
def mcrImpl(using Quotes): Expr[Any] = {
val x = '{1}
g(x)
}
|
dotty-staging/dotty
|
tests/run-macros/i7048/Lib_1.scala
|
Scala
|
apache-2.0
| 517 |
package microjson
import scala.collection.mutable.ArrayBuffer
sealed trait JsValue {
def value: Any
}
case class JsString(value: java.lang.String) extends JsValue
case class JsObject(value: Map[String, JsValue]) extends JsValue
case class JsArray(value: Seq[JsValue]) extends JsValue
case class JsNumber(value: java.lang.String) extends JsValue
trait JsBoolean extends JsValue {
def value: Boolean
}
case object JsFalse extends JsBoolean {
val value = false
}
case object JsTrue extends JsBoolean {
val value = true
}
case object JsNull extends JsValue {
def value = null
}
object Json {
// *** Character Kinds
type CharKind = Int
val Letter = 0
val Digit = 1
val Minus = 2
val Quote = 3
val Colon = 4
val Comma = 5
val Lbra = 6
val Rbra = 7
val Larr = 8
val Rarr = 9
val Blank = 10
val Other = 11
val Eof = 12
val Slash = 13
// *** Token Kinds
type TokenKind = Int
val ID = 0
val STRING = 1
val NUMBER = 2
val BIGNUMBER = 3
val FLOATNUMBER = 4
val COLON = 5
val COMMA = 6
val LOBJ = 7
val ROBJ = 8
val LARR = 9
val RARR = 10
val BLANK = 11
val EOF = 12
// *** Character => CharKind Map ***
val charKind = (0 to 255).toArray.map {
case c if 'a'.toInt <= c && c <= 'z'.toInt => Letter
case c if 'A'.toInt <= c && c <= 'Z'.toInt => Letter
case c if '0'.toInt <= c && c <= '9'.toInt => Digit
case '-' => Minus
case ',' => Comma
case '"' => Quote
case ':' => Colon
case '{' => Lbra
case '}' => Rbra
case '[' => Larr
case ']' => Rarr
case ' ' => Blank
case '\\t' => Blank
case '\\n' => Blank
case '\\r' => Blank
case '/' => Slash
case _ => Other
}
// *** Character Escapes
val escapeMap = Map[Int, String](
'\\\\'.toInt -> "\\\\",
'/'.toInt -> "/",
'\\"'.toInt -> "\\"",
'b'.toInt -> "\\b",
'f'.toInt -> "\\f",
'n'.toInt -> "\\n",
'r'.toInt -> "\\r",
't'.toInt -> "\\t"
)
def writeToBuffer(v: JsValue, sb: StringBuffer): Unit = v match {
case JsString(s) =>
sb.append('"')
var i = 0
while(i < s.length){
s.charAt(i) match {
case '\\\\' => sb.append("\\\\\\\\")
case '"' => sb.append("\\\\\\"")
case '/' => sb.append("\\\\/")
case '\\b' => sb.append("\\\\b")
case '\\t' => sb.append("\\\\t")
case '\\n' => sb.append("\\\\n")
case '\\f' => sb.append("\\\\f")
case '\\r' => sb.append("\\\\r")
case c =>
if (c < ' '){
val t = "000" + Integer.toHexString(c)
sb.append("\\\\u" + t.takeRight(4))
}else{
sb.append(c.toString)
}
}
i += 1
}
sb.append('"')
case JsObject(kvs) =>
sb.append("{")
var first = true
kvs.foreach(kv => {
val (k, v) = kv
if (first)
first = false
else
sb.append(", ")
writeToBuffer(JsString(k), sb)
sb.append(": ")
writeToBuffer(v, sb)
})
sb.append("}")
case JsArray(vs) =>
sb.append("[")
if (vs.length > 0) writeToBuffer(vs(0), sb)
var i = 1
while(i < vs.length){
sb.append(", ")
writeToBuffer(vs(i), sb)
i += 1
}
sb.append("]")
case JsNumber(d) => sb.append(d)
case JsFalse => sb.append("false")
case JsTrue => sb.append("true")
case JsNull => sb.append("null")
}
def write(v: JsValue): String = {
val sb = new StringBuffer()
Json.writeToBuffer(v, sb)
sb.toString
}
/**
* Self-contained JSON parser adapted from
*
* https://github.com/nestorpersist/json
*/
def read(s: String): JsValue = {
// *** Import Shared Data ***
// *** INPUT STRING ***
// array faster than accessing string directly using charAt
//final val s1 = s.toCharArray()
val size = s.size
// *** CHARACTERS ***
var pos = 0
var ch: Int = 0
var chKind: CharKind = 0
var chLinePos: Int = 0
var chCharPos: Int = 0
def chNext() = {
if (pos < size) {
//ch = s1(pos).toInt
ch = s.charAt(pos)
chKind = if (ch < 255) {
charKind(ch)
} else {
Other
}
pos += 1
if (ch == '\\n'.toInt) {
chLinePos += 1
chCharPos = 1
} else {
chCharPos += 1
}
} else {
ch = -1
pos = size + 1
chKind = Eof
}
}
def chError(msg: String): Nothing = {
throw new Json.Exception(msg, s, chLinePos, chCharPos)
}
def chMark = pos - 1
def chSubstr(first: Int, delta: Int = 0) = {
s.substring(first, pos - 1 - delta)
}
// *** LEXER ***
var tokenKind = BLANK
var tokenValue = ""
var linePos = 1
var charPos = 1
def getDigits() = {
while (chKind == Digit) chNext()
}
def handleDigit() {
val first = chMark
getDigits()
val k1 = if (ch == '.'.toInt) {
chNext()
getDigits()
BIGNUMBER
} else {
NUMBER
}
val k2 = if (ch == 'E'.toInt || ch == 'e'.toInt) {
chNext()
if (ch == '+'.toInt) {
chNext()
} else if (ch == '-'.toInt) {
chNext()
}
getDigits()
FLOATNUMBER
} else {
k1
}
tokenKind = k2
tokenValue = chSubstr(first)
}
def handleRaw() {
chNext()
val first = chMark
var state = 0
do {
if (chKind == Eof) chError("EOF encountered in raw string")
state = (ch, state) match {
case ('}', _) => 1
case ('"', 1) => 2
case ('"', 2) => 3
case ('"', 3) => 0
case _ => 0
}
chNext()
} while (state != 3)
tokenKind = STRING
tokenValue = chSubstr(first, 3)
}
def handle(i: Int) = {
chNext()
tokenKind = i
tokenValue = ""
}
def tokenNext() {
do {
linePos = chLinePos
charPos = chCharPos
val kind: Int = chKind
kind match {
case Letter =>
val first = chMark
while (chKind == Letter || chKind == Digit) {
chNext()
}
tokenKind = ID
tokenValue = chSubstr(first)
case Digit => handleDigit()
case Minus =>
chNext()
handleDigit()
tokenValue = "-" + tokenValue
case Quote =>
val sb = new StringBuilder(50)
chNext()
var first = chMark
while (ch != '"'.toInt && ch >= 32) {
if (ch == '\\\\'.toInt) {
sb.append(chSubstr(first))
chNext()
escapeMap.get(ch) match {
case Some(s) =>
sb.append(s)
chNext()
case None =>
if (ch != 'u'.toInt) chError("Illegal escape")
chNext()
var code = 0
for (i <- 1 to 4) {
val ch1 = ch.toChar.toString
val i = "0123456789abcdef".indexOf(ch1.toLowerCase)
if (i == -1) chError("Illegal hex character")
code = code * 16 + i
chNext()
}
sb.append(code.toChar.toString)
}
first = chMark
} else {
chNext()
}
}
if (ch != '"') chError("Unexpected string character: " + ch.toChar)
sb.append(chSubstr(first))
tokenKind = STRING
tokenValue = sb.toString()
chNext()
if (tokenValue.length() == 0 && ch == '{') {
handleRaw()
}
case Colon => handle(COLON)
case Comma => handle(COMMA)
case Lbra => handle(LOBJ)
case Rbra => handle(ROBJ)
case Larr => handle(LARR)
case Rarr => handle(RARR)
case Blank =>
do chNext() while (chKind == Blank)
tokenKind = BLANK
tokenValue = ""
case Other => chError("Unexpected character: " + ch.toChar + " " + ch)
case Eof =>
chNext()
tokenKind = EOF
tokenValue = ""
case Slash =>
if (chKind != Slash) chError("Expecting Slash")
do chNext() while (ch != '\\n' && chKind != Eof)
tokenKind = BLANK
tokenValue = ""
}
} while (tokenKind == BLANK)
}
def tokenError(msg: String): Nothing = {
throw new Json.Exception(msg, s, linePos, charPos)
}
// *** PARSER ***
def handleEof() = tokenError("Unexpected eof")
def handleUnexpected(i: String) = tokenError(s"Unexpected input: [$i]")
def handleArray(): JsArray = {
tokenNext()
val result = ArrayBuffer.empty[JsValue]
while (tokenKind != RARR) {
result += getJson()
tokenKind match{
case COMMA => tokenNext()
case RARR => // do nothing
case _ => tokenError("Expecting , or ]")
}
}
tokenNext()
JsArray(result.toIndexedSeq)
}
def handleObject(): JsObject = {
tokenNext()
val result = ArrayBuffer.empty[(String, JsValue)]
while (tokenKind != ROBJ) {
if (tokenKind != STRING && tokenKind != ID) tokenError("Expecting string or name")
val name = tokenValue
tokenNext()
if (tokenKind != COLON) tokenError("Expecting :")
tokenNext()
result += (name -> getJson())
tokenKind match{
case COMMA => tokenNext()
case ROBJ => // do nothing
case _ => tokenError("Expecting , or }")
}
}
tokenNext()
JsObject(result.toMap)
}
def handleNumber(name: String, f: String => Unit) = {
try {
f(tokenValue)
} catch {
case _: Throwable => tokenError("Bad " + name)
}
val old = tokenValue
tokenNext()
JsNumber(old)
}
def getJson(): JsValue = {
val kind: Int = tokenKind
val result: JsValue = kind match {
case ID =>
val result: JsValue = tokenValue match {
case "true" => JsTrue
case "false" => JsFalse
case "null" => JsNull
case _ => tokenError("Not true, false, or null")
}
tokenNext()
result
case STRING =>
val result = tokenValue
tokenNext()
JsString(result)
case NUMBER => handleNumber("NUMBER", _.toLong)
case BIGNUMBER => handleNumber("BIGNUMBER", _.toDouble)
case FLOATNUMBER => handleNumber("FLOATNUMBER", _.toDouble)
case COLON => handleUnexpected(":")
case COMMA => handleUnexpected(",")
case LOBJ => handleObject()
case ROBJ => handleUnexpected("}")
case LARR => handleArray()
case RARR => handleUnexpected("]")
case EOF => handleEof()
}
result
}
def parse(): JsValue = {
chNext()
tokenNext()
val result = getJson
if (tokenKind != EOF) tokenError("Excess input")
result
}
parse()
}
class Exception(val msg: String,
val input: String,
val line: Int,
val char: Int)
extends scala.Exception(s"JsonParse Error: $msg line $line [$char] in $input")
}
|
benhutchison/MicroJson
|
shared/src/main/scala/microjson/Js.scala
|
Scala
|
apache-2.0
| 11,583 |
package org.jetbrains.plugins.scala
package lang
package refactoring
package namesSuggester
package genericTypes
import org.jetbrains.plugins.scala.lang.psi.types.ScType
import org.jetbrains.plugins.scala.lang.psi.types.api.ParameterizedType
/**
* @author adkozlov
*/
class MonadicTypeNamesProvider extends GenericTypeNamesProvider {
import MonadicTypeNamesProvider._
override protected def names(designator: ScType, arguments: Seq[ScType]): Seq[String] =
NameSuggester.compoundNames(findPrefix(designator).toSeq, argumentNames(arguments.head))
override def isValid(`type`: ScType): Boolean =
`type` match {
case ParameterizedType(designator, Seq(_)) => findPrefix(designator).isDefined
case _ => false
}
}
object MonadicTypeNamesProvider {
private def findPrefix(designator: ScType): Option[String] =
needPrefix.get(designator.canonicalText)
private[this] val needPrefix = Map(
"_root_.scala.Option" -> "maybe",
"_root_.scala.Some" -> "some",
"_root_.scala.concurrent.Future" -> "eventual",
"_root_.scala.concurrent.Promise" -> "promised",
"_root_.scala.util.Try" -> "tried"
)
}
|
ilinum/intellij-scala
|
src/org/jetbrains/plugins/scala/lang/refactoring/namesSuggester/genericTypes/MonadicTypeNamesProvider.scala
|
Scala
|
apache-2.0
| 1,152 |
/*
* Copyright (C) 2013 FURYU CORPORATION
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
import sbt._
import Keys._
object ApplicationBuild extends Build {
val appOrganization = "jp.furyu"
val appName = "play-c3p0-plugin"
val appVersion = "0.3"
val appScalaVersion = "2.11.1"
val appScalaCrossVersions = Seq(appScalaVersion, "2.10.4")
val main = Project(appName, base = file(".")).settings(
organization := appOrganization,
version := appVersion,
scalaVersion := appScalaVersion,
crossScalaVersions := appScalaCrossVersions,
resolvers ++= Seq(
Resolver.typesafeRepo("releases")
),
libraryDependencies ++= Seq(
"com.typesafe.play" %% "play-jdbc" % "2.3.9",
"com.mchange" % "c3p0" % "0.9.5"
),
publishMavenStyle := true,
publishTo <<= version { (v: String) =>
val nexus = "https://oss.sonatype.org/"
if (v.trim.endsWith("SNAPSHOT")) {
Some("snapshots" at nexus + "content/repositories/snapshots")
} else {
Some("releases" at nexus + "service/local/staging/deploy/maven2")
}
},
publishArtifact in Test := false,
pomIncludeRepository := { _ => false },
pomExtra := (
<url>https://github.com/Furyu/play-c3p0-plugin</url>
<licenses>
<license>
<name>Apache License, Version 2.0</name>
<url>http://www.apache.org/licenses/LICENSE-2.0</url>
<distribution>repo</distribution>
</license>
</licenses>
<scm>
<url>[email protected]:Furyu/play-c3p0-plugin.git</url>
<connection>scm:git:[email protected]:Furyu/play-c3p0-plugin.git</connection>
</scm>
<developers>
<developer>
<id>flysheep1980</id>
<name>flysheep1980</name>
<url>https://github.com/flysheep1980</url>
</developer>
</developers>
)
).settings(appScalariformSettings)
private lazy val appScalariformSettings = {
import com.typesafe.sbt.SbtScalariform
import scalariform.formatter.preferences._
SbtScalariform.scalariformSettings ++ Seq(
SbtScalariform.ScalariformKeys.preferences := FormattingPreferences()
.setPreference(IndentWithTabs, false)
.setPreference(DoubleIndentClassDeclaration, true)
.setPreference(PreserveDanglingCloseParenthesis, true)
)
}
}
|
Furyu/play-c3p0-plugin
|
project/Build.scala
|
Scala
|
lgpl-2.1
| 3,045 |
package scife
package enumeration
package showcase
import dependent._
import memoization._
import scife.{ enumeration => e }
import scife.util._
import structures._
import BSTrees._
import scife.util.logging._
import scala.language.existentials
import scala.language.implicitConversions
import org.scalatest._
import org.scalatest.prop._
import org.scalameter.api._
class PedagogicalExample extends FunSuite with Matchers with GeneratorDrivenPropertyChecks with HasLogger with ProfileLogger {
import Util.CheckerHelper
import Checks._
// import DSL
import e._
import Enum._
test("Dates example") {
// Java Date constructor is deprecated...
case class Date(y: Int, m: Int, d: Int)
import java.util._
val cal = Calendar.getInstance()
cal.setLenient(false)
// Java calendar months are 0 .. 11
implicit def dateToJavaDate(d: Date) = {
info("Date given: " + d)
cal.set(d.y, d.m - 1, d.d)
cal.getTime
}
def isValid(date: ((Int, Int), Int)) = date match {
case ((d, m), y) =>
try {
cal.set(y, m - 1, d)
cal.getTime
true
} catch {
case _: Exception =>
fine("Invalid date filtered: " + date)
false
}
}
val dates = (1 to 31) β (1 to 12) β Stream.from(2014) β» {
isValid(_)
} β { case ((d, m), y) β new Date(y, m, d) }
val comb = (1 to 31) β (1 to 12) β Stream.from(2014) β» { isValid(_) }
comb shouldBe a [eager.SimpleFilter[_]]
comb(209) should be ((28, 7), 2014)
for (i <- 0 until 400)
info("comb(%d) is %s".format(i, comb(i)))
for (i <- 0 until 400)
withClue((dates(i): java.util.Date).toString) {
noException should be thrownBy
cal.setTime(dates(i))
}
cal.setTime(dates(209))
val scala2014StartDate = cal.getTime
cal.get(Calendar.YEAR) should be (2014)
cal.get(Calendar.DAY_OF_MONTH) should be (28)
// months start from 0
cal.get(Calendar.MONTH) should be (6)
}
// paper
test("allConvenientlyDivisblePairs") {
val allConvenientlyDivisblePairs =
(List((1, 1), (3, 3)) β
((1 to 9) ** (Stream.from(0) filter { _ % 5 == 0 })) map
{ case (x, y) => ("valid", x, y) })
}
test("allConvenientlyDivisblePairs2") {
(Enum((1, 1), (3, 3)): Enum[(Int, Int)]) β (Enum(1 to 9)
product (Stream.from(0) filter { _ % 5 == 0 })) β {
case (x, y) => ("valid", x, y) }
}
test("lazyRecursiveEnumeration") {
// val enum = rec(self, ind) {
// Enum(2, 3) concat
// inmap(ind - 2)self
// }
//
// val res: Enum[(String, ((Int, Int), Int))] =
// ((Enum(1 to 31) ** Enum(1 to 12) ** Enum(Stream.from(2014)) β {
// _ => true; // whether its a good year
// }): Enum[((Int, Int), Int)]) β { case p@((x, y), z) => if (true) ("leap", p) else ("no", p) }
//
//
// val res2 =
// (1 to 31) β (1 to 12) β Stream.from(2014) β {
// case ((d, m), y) => true } β { ("leap", _) }
//
//
// // lazy fibonacci are doable!
// val fibs: Enum[Int] = (Enum(0, 1): Enum[Int])
// val fibs2: Enum[Int] = (Enum(Stream.from(0)).map{ (i: Int) => fibs(i-1) + fibs(i-2) })
// val fib = fibs ++ fibs2
}
}
|
kaptoxic/SciFe
|
src/test/scala/scife/enumeration/showcase/PedagogicalExample.scala
|
Scala
|
gpl-2.0
| 3,347 |
package im.tox.antox.data
import java.util
import java.util.ArrayList
import android.content.{ContentValues, Context}
import android.database.sqlite.{SQLiteDatabase, SQLiteOpenHelper}
//remove if not needed
class UserDB(ctx: Context) extends SQLiteOpenHelper(ctx, "userdb", null, 1) {
private var CREATE_TABLE_USERS: String = "CREATE TABLE IF NOT EXISTS users" + " ( _id integer primary key , " +
"username text," +
"password text," +
"nickname text," +
"status text," +
"status_message text);"
override def onCreate(db: SQLiteDatabase) {
db.execSQL(CREATE_TABLE_USERS)
}
override def onUpgrade(db: SQLiteDatabase, oldVersion: Int, newVersion: Int) {
}
def addUser(username: String, password: String) {
val db = this.getWritableDatabase
val values = new ContentValues()
values.put("username", username)
values.put("password", password)
values.put("nickname", username)
values.put("status", "online")
values.put("status_message", "Hey! I'm using Antox")
db.insert("users", null, values)
db.close()
}
def login(username: String): Boolean = {
val db = this.getReadableDatabase
val cursor = db.rawQuery("SELECT count(*) FROM users WHERE username='" + username +
"'", null)
cursor.moveToFirst()
val count = cursor.getInt(0)
cursor.close()
db.close()
count > 0
}
def getUserDetails(username: String): Array[String] = {
val details = Array.ofDim[String](4)
val db = this.getReadableDatabase
val query = "SELECT * FROM users WHERE username='" + username + "'"
val cursor = db.rawQuery(query, null)
if (cursor.moveToFirst()) {
//WHY WOULD ANY SANE MAN DO THIS
details(0) = cursor.getString(3) //nickname
details(1) = cursor.getString(2) //password
details(2) = cursor.getString(4) //status
details(3) = cursor.getString(5) //status message
}
cursor.close()
db.close()
details
}
def updateUserDetail(username: String, detail: String, newDetail: String) {
val db = this.getReadableDatabase
val query = "UPDATE users SET " + detail + "='" + newDetail + "' WHERE username='" +
username +
"'"
db.execSQL(query)
db.close()
}
def doUsersExist(): Boolean = {
val db = this.getReadableDatabase
val cursor = db.rawQuery("SELECT count(*) FROM users", null)
cursor.moveToFirst()
val count = cursor.getInt(0)
cursor.close()
db.close()
count > 0
}
def getAllProfiles: util.ArrayList[String] = {
val profiles = new util.ArrayList[String]()
val sqLiteDatabase = this.getReadableDatabase
val query = "SELECT username FROM users"
val cursor = sqLiteDatabase.rawQuery(query, null)
if (cursor.moveToFirst()) {
do {
profiles.add(cursor.getString(0))
} while (cursor.moveToNext())
}
profiles
}
}
|
0xPoly/Antox
|
app/src/main/scala/im/tox/antox/data/UserDB.scala
|
Scala
|
gpl-3.0
| 2,876 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.rpc
import java.io.IOException
import java.net.{BindException, InetAddress}
import java.util.{List => JList, Map => JMap, Objects, Random, UUID}
import java.util.concurrent.atomic.AtomicBoolean
import scala.collection.JavaConverters._
import scala.collection.mutable.ArrayBuffer
import scala.concurrent.Future
import scala.concurrent.duration.Duration
import scala.util.{Failure, Success, Try}
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.mapred.JobConf
import org.apache.hadoop.mapreduce.Job
import org.apache.spark.{SecurityManager, SerializableWritable, SparkConf}
import org.apache.spark.rpc.netty.NettyRpcEnvFactory
import org.apache.spark.search._
import org.apache.spark.util.ThreadUtils
import org.apache.carbondata.common.annotations.InterfaceAudience
import org.apache.carbondata.common.logging.LogServiceFactory
import org.apache.carbondata.core.constants.CarbonCommonConstants
import org.apache.carbondata.core.datastore.block.Distributable
import org.apache.carbondata.core.datastore.row.CarbonRow
import org.apache.carbondata.core.metadata.schema.table.CarbonTable
import org.apache.carbondata.core.scan.expression.Expression
import org.apache.carbondata.core.util.CarbonProperties
import org.apache.carbondata.hadoop.CarbonMultiBlockSplit
import org.apache.carbondata.hadoop.api.CarbonInputFormat
import org.apache.carbondata.hadoop.util.CarbonInputFormatUtil
import org.apache.carbondata.processing.util.CarbonLoaderUtil
import org.apache.carbondata.store.worker.Status
/**
* Master of CarbonSearch.
* It provides a Registry service for worker to register.
* And it provides search API to fire RPC call to workers.
*/
@InterfaceAudience.Internal
class Master(sparkConf: SparkConf) {
private val LOG = LogServiceFactory.getLogService(this.getClass.getCanonicalName)
// worker host address map to EndpointRef
private val random = new Random
private var rpcEnv: RpcEnv = _
private val scheduler: Scheduler = new Scheduler
/** start service and listen on port passed in constructor */
def startService(): Unit = {
if (rpcEnv == null) {
LOG.info("Start search mode master thread")
val isStarted: AtomicBoolean = new AtomicBoolean(false)
new Thread(new Runnable {
override def run(): Unit = {
val hostAddress = InetAddress.getLocalHost.getHostAddress
var port = CarbonProperties.getSearchMasterPort
var exception: BindException = null
var numTry = 100 // we will try to create service at worse case 100 times
do {
try {
LOG.info(s"starting registry-service on $hostAddress:$port")
val config = RpcUtil.getRpcEnvConfig(
sparkConf, "registry-service", hostAddress, "", port,
new SecurityManager(sparkConf), clientMode = false)
rpcEnv = new NettyRpcEnvFactory().create(config)
numTry = 0
} catch {
case e: BindException =>
// port is occupied, increase the port number and try again
exception = e
LOG.error(s"start registry-service failed: ${e.getMessage}")
port = port + 1
numTry = numTry - 1
}
} while (numTry > 0)
if (rpcEnv == null) {
// we have tried many times, but still failed to find an available port
throw exception
}
val registryEndpoint: RpcEndpoint = new Registry(rpcEnv, Master.this)
rpcEnv.setupEndpoint("registry-service", registryEndpoint)
if (isStarted.compareAndSet(false, false)) {
synchronized {
isStarted.compareAndSet(false, true)
}
}
LOG.info("registry-service started")
rpcEnv.awaitTermination()
}
}).start()
var count = 0
val countThreshold = 5000
while (isStarted.compareAndSet(false, false) && count < countThreshold) {
LOG.info(s"Waiting search mode master to start, retrying $count times")
Thread.sleep(10)
count = count + 1;
}
if (count >= countThreshold) {
LOG.error(s"Search mode try $countThreshold times to start master but failed")
throw new RuntimeException(
s"Search mode try $countThreshold times to start master but failed")
} else {
LOG.info("Search mode master started")
}
} else {
LOG.info("Search mode master has already started")
}
}
def stopService(): Unit = {
if (rpcEnv != null) {
rpcEnv.shutdown()
rpcEnv = null
}
}
def stopAllWorkers(): Unit = {
val futures = scheduler.getAllWorkers.toSeq.map { case (address, schedulable) =>
(address, schedulable.ref.ask[ShutdownResponse](ShutdownRequest("user")))
}
futures.foreach { case (address, future) =>
ThreadUtils.awaitResult(future, Duration.apply("10s"))
future.value match {
case Some(result) =>
result match {
case Success(response) => scheduler.removeWorker(address)
case Failure(throwable) => throw new IOException(throwable.getMessage)
}
case None => throw new ExecutionTimeoutException
}
}
}
/** A new searcher is trying to register, add it to the map and connect to this searcher */
def addWorker(request: RegisterWorkerRequest): RegisterWorkerResponse = {
LOG.info(s"Receive Register request from worker ${request.hostAddress}:${request.port} " +
s"with ${request.cores} cores")
val workerId = UUID.randomUUID().toString
val workerAddress = request.hostAddress
val workerPort = request.port
LOG.info(s"connecting to worker ${request.hostAddress}:${request.port}, workerId $workerId")
val endPointRef =
rpcEnv.setupEndpointRef(RpcAddress(workerAddress, workerPort), "search-service")
scheduler.addWorker(workerAddress,
new Schedulable(workerId, workerAddress, workerPort, request.cores, endPointRef))
LOG.info(s"worker ${request.hostAddress}:${request.port} registered")
RegisterWorkerResponse(workerId)
}
/**
* Execute search by firing RPC call to worker, return the result rows
* @param table table to search
* @param columns projection column names
* @param filter filter expression
* @param globalLimit max number of rows required in Master
* @param localLimit max number of rows required in Worker
* @return
*/
def search(table: CarbonTable, columns: Array[String], filter: Expression,
globalLimit: Long, localLimit: Long): Array[CarbonRow] = {
Objects.requireNonNull(table)
Objects.requireNonNull(columns)
if (globalLimit < 0 || localLimit < 0) {
throw new IllegalArgumentException("limit should be positive")
}
val queryId = random.nextInt
var rowCount = 0
val output = new ArrayBuffer[CarbonRow]
def onSuccess(result: SearchResult): Unit = {
// in case of RPC success, collect all rows in response message
if (result.queryId != queryId) {
throw new IOException(
s"queryId in response does not match request: ${result.queryId} != $queryId")
}
if (result.status != Status.SUCCESS.ordinal()) {
throw new IOException(s"failure in worker: ${ result.message }")
}
val itor = result.rows.iterator
while (itor.hasNext && rowCount < globalLimit) {
output += new CarbonRow(itor.next())
rowCount = rowCount + 1
}
LOG.info(s"[SearchId:$queryId] accumulated result size $rowCount")
}
def onFaiure(e: Throwable) = throw new IOException(s"exception in worker: ${ e.getMessage }")
def onTimedout() = throw new ExecutionTimeoutException()
// prune data and get a mapping of worker hostname to list of blocks,
// then add these blocks to the SearchRequest and fire the RPC call
val nodeBlockMapping: JMap[String, JList[Distributable]] = pruneBlock(table, columns, filter)
val tuple = nodeBlockMapping.asScala.map { case (splitAddress, blocks) =>
// Build a SearchRequest
val split = new SerializableWritable[CarbonMultiBlockSplit](
new CarbonMultiBlockSplit(blocks, splitAddress))
val request =
SearchRequest(queryId, split, table.getTableInfo, columns, filter, localLimit)
// Find an Endpoind and send the request to it
// This RPC is non-blocking so that we do not need to wait before send to next worker
scheduler.sendRequestAsync[SearchResult](splitAddress, request)
}
// loop to get the result of each Worker
tuple.foreach { case (worker: Schedulable, future: Future[SearchResult]) =>
// if we have enough data already, we do not need to collect more result
if (rowCount < globalLimit) {
// wait for worker
val timeout = CarbonProperties
.getInstance()
.getProperty(CarbonCommonConstants.CARBON_SEARCH_QUERY_TIMEOUT,
CarbonCommonConstants.CARBON_SEARCH_QUERY_TIMEOUT_DEFAULT)
ThreadUtils.awaitResult(future, Duration.apply(timeout))
LOG.info(s"[SearchId:$queryId] receive search response from worker " +
s"${worker.address}:${worker.port}")
try {
future.value match {
case Some(response: Try[SearchResult]) =>
response match {
case Success(result) => onSuccess(result)
case Failure(e) => onFaiure(e)
}
case None => onTimedout()
}
} finally {
worker.workload.decrementAndGet()
}
}
}
output.toArray
}
/**
* Prune data by using CarbonInputFormat.getSplit
* Return a mapping of host address to list of block
*/
private def pruneBlock(
table: CarbonTable,
columns: Array[String],
filter: Expression): JMap[String, JList[Distributable]] = {
val jobConf = new JobConf(new Configuration)
val job = new Job(jobConf)
val format = CarbonInputFormatUtil.createCarbonTableInputFormat(
job, table, columns, filter, null, null)
// We will do FG pruning in reader side, so don't do it here
CarbonInputFormat.setFgDataMapPruning(job.getConfiguration, false)
val splits = format.getSplits(job)
val distributables = splits.asScala.map { split =>
split.asInstanceOf[Distributable]
}
CarbonLoaderUtil.nodeBlockMapping(
distributables.asJava,
-1,
getWorkers.asJava,
CarbonLoaderUtil.BlockAssignmentStrategy.BLOCK_NUM_FIRST,
null)
}
/** return hostname of all workers */
def getWorkers: Seq[String] = scheduler.getAllWorkers.map(_._1).toSeq
}
// Exception if execution timed out in search mode
class ExecutionTimeoutException extends RuntimeException
|
sgururajshetty/carbondata
|
store/search/src/main/scala/org/apache/spark/rpc/Master.scala
|
Scala
|
apache-2.0
| 11,621 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution
import org.apache.spark.annotation.DeveloperApi
import org.apache.spark.rdd.RDD
import org.apache.spark.serializer.Serializer
import org.apache.spark.shuffle.hash.HashShuffleManager
import org.apache.spark.shuffle.sort.SortShuffleManager
import org.apache.spark.shuffle.unsafe.UnsafeShuffleManager
import org.apache.spark.sql.SQLContext
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.errors.attachTree
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.expressions.codegen.GenerateUnsafeProjection
import org.apache.spark.sql.catalyst.plans.physical._
import org.apache.spark.sql.catalyst.rules.Rule
import org.apache.spark.util.MutablePair
import org.apache.spark.{HashPartitioner, Partitioner, RangePartitioner, SparkEnv}
/**
* :: DeveloperApi ::
* Performs a shuffle that will result in the desired `newPartitioning`.
*/
@DeveloperApi
case class Exchange(newPartitioning: Partitioning, child: SparkPlan) extends UnaryNode {
override def nodeName: String = if (tungstenMode) "TungstenExchange" else "Exchange"
/**
* Returns true iff we can support the data type, and we are not doing range partitioning.
*/
private lazy val tungstenMode: Boolean = {
unsafeEnabled && codegenEnabled && GenerateUnsafeProjection.canSupport(child.schema) &&
!newPartitioning.isInstanceOf[RangePartitioning]
}
override def outputPartitioning: Partitioning = newPartitioning
override def output: Seq[Attribute] = child.output
// This setting is somewhat counterintuitive:
// If the schema works with UnsafeRow, then we tell the planner that we don't support safe row,
// so the planner inserts a converter to convert data into UnsafeRow if needed.
override def outputsUnsafeRows: Boolean = tungstenMode
override def canProcessSafeRows: Boolean = !tungstenMode
override def canProcessUnsafeRows: Boolean = tungstenMode
/**
* Determines whether records must be defensively copied before being sent to the shuffle.
* Several of Spark's shuffle components will buffer deserialized Java objects in memory. The
* shuffle code assumes that objects are immutable and hence does not perform its own defensive
* copying. In Spark SQL, however, operators' iterators return the same mutable `Row` object. In
* order to properly shuffle the output of these operators, we need to perform our own copying
* prior to sending records to the shuffle. This copying is expensive, so we try to avoid it
* whenever possible. This method encapsulates the logic for choosing when to copy.
*
* In the long run, we might want to push this logic into core's shuffle APIs so that we don't
* have to rely on knowledge of core internals here in SQL.
*
* See SPARK-2967, SPARK-4479, and SPARK-7375 for more discussion of this issue.
*
* @param partitioner the partitioner for the shuffle
* @param serializer the serializer that will be used to write rows
* @return true if rows should be copied before being shuffled, false otherwise
*/
private def needToCopyObjectsBeforeShuffle(
partitioner: Partitioner,
serializer: Serializer): Boolean = {
// Note: even though we only use the partitioner's `numPartitions` field, we require it to be
// passed instead of directly passing the number of partitions in order to guard against
// corner-cases where a partitioner constructed with `numPartitions` partitions may output
// fewer partitions (like RangePartitioner, for example).
val conf = child.sqlContext.sparkContext.conf
val shuffleManager = SparkEnv.get.shuffleManager
val sortBasedShuffleOn = shuffleManager.isInstanceOf[SortShuffleManager] ||
shuffleManager.isInstanceOf[UnsafeShuffleManager]
val bypassMergeThreshold = conf.getInt("spark.shuffle.sort.bypassMergeThreshold", 200)
val serializeMapOutputs = conf.getBoolean("spark.shuffle.sort.serializeMapOutputs", true)
if (sortBasedShuffleOn) {
val bypassIsSupported = SparkEnv.get.shuffleManager.isInstanceOf[SortShuffleManager]
if (bypassIsSupported && partitioner.numPartitions <= bypassMergeThreshold) {
// If we're using the original SortShuffleManager and the number of output partitions is
// sufficiently small, then Spark will fall back to the hash-based shuffle write path, which
// doesn't buffer deserialized records.
// Note that we'll have to remove this case if we fix SPARK-6026 and remove this bypass.
false
} else if (serializeMapOutputs && serializer.supportsRelocationOfSerializedObjects) {
// SPARK-4550 extended sort-based shuffle to serialize individual records prior to sorting
// them. This optimization is guarded by a feature-flag and is only applied in cases where
// shuffle dependency does not specify an aggregator or ordering and the record serializer
// has certain properties. If this optimization is enabled, we can safely avoid the copy.
//
// Exchange never configures its ShuffledRDDs with aggregators or key orderings, so we only
// need to check whether the optimization is enabled and supported by our serializer.
//
// This optimization also applies to UnsafeShuffleManager (added in SPARK-7081).
false
} else {
// Spark's SortShuffleManager uses `ExternalSorter` to buffer records in memory. This code
// path is used both when SortShuffleManager is used and when UnsafeShuffleManager falls
// back to SortShuffleManager to perform a shuffle that the new fast path can't handle. In
// both cases, we must copy.
true
}
} else if (shuffleManager.isInstanceOf[HashShuffleManager]) {
// We're using hash-based shuffle, so we don't need to copy.
false
} else {
// Catch-all case to safely handle any future ShuffleManager implementations.
true
}
}
@transient private lazy val sparkConf = child.sqlContext.sparkContext.getConf
private val serializer: Serializer = {
val rowDataTypes = child.output.map(_.dataType).toArray
if (tungstenMode) {
new UnsafeRowSerializer(child.output.size)
} else {
new SparkSqlSerializer(sparkConf)
}
}
protected override def doExecute(): RDD[InternalRow] = attachTree(this , "execute") {
val rdd = child.execute()
val part: Partitioner = newPartitioning match {
case HashPartitioning(expressions, numPartitions) => new HashPartitioner(numPartitions)
case RangePartitioning(sortingExpressions, numPartitions) =>
// Internally, RangePartitioner runs a job on the RDD that samples keys to compute
// partition bounds. To get accurate samples, we need to copy the mutable keys.
val rddForSampling = rdd.mapPartitions { iter =>
val mutablePair = new MutablePair[InternalRow, Null]()
iter.map(row => mutablePair.update(row.copy(), null))
}
// We need to use an interpreted ordering here because generated orderings cannot be
// serialized and this ordering needs to be created on the driver in order to be passed into
// Spark core code.
implicit val ordering = new InterpretedOrdering(sortingExpressions, child.output)
new RangePartitioner(numPartitions, rddForSampling, ascending = true)
case SinglePartition =>
new Partitioner {
override def numPartitions: Int = 1
override def getPartition(key: Any): Int = 0
}
case _ => sys.error(s"Exchange not implemented for $newPartitioning")
// TODO: Handle BroadcastPartitioning.
}
def getPartitionKeyExtractor(): InternalRow => InternalRow = newPartitioning match {
case HashPartitioning(expressions, _) => newMutableProjection(expressions, child.output)()
case RangePartitioning(_, _) | SinglePartition => identity
case _ => sys.error(s"Exchange not implemented for $newPartitioning")
}
val rddWithPartitionIds: RDD[Product2[Int, InternalRow]] = {
if (needToCopyObjectsBeforeShuffle(part, serializer)) {
rdd.mapPartitions { iter =>
val getPartitionKey = getPartitionKeyExtractor()
iter.map { row => (part.getPartition(getPartitionKey(row)), row.copy()) }
}
} else {
rdd.mapPartitions { iter =>
val getPartitionKey = getPartitionKeyExtractor()
val mutablePair = new MutablePair[Int, InternalRow]()
iter.map { row => mutablePair.update(part.getPartition(getPartitionKey(row)), row) }
}
}
}
new ShuffledRowRDD(rddWithPartitionIds, serializer, part.numPartitions)
}
}
/**
* Ensures that the [[org.apache.spark.sql.catalyst.plans.physical.Partitioning Partitioning]]
* of input data meets the
* [[org.apache.spark.sql.catalyst.plans.physical.Distribution Distribution]] requirements for
* each operator by inserting [[Exchange]] Operators where required. Also ensure that the
* input partition ordering requirements are met.
*/
private[sql] case class EnsureRequirements(sqlContext: SQLContext) extends Rule[SparkPlan] {
// TODO: Determine the number of partitions.
private def numPartitions: Int = sqlContext.conf.numShufflePartitions
/**
* Given a required distribution, returns a partitioning that satisfies that distribution.
*/
private def canonicalPartitioning(requiredDistribution: Distribution): Partitioning = {
requiredDistribution match {
case AllTuples => SinglePartition
case ClusteredDistribution(clustering) => HashPartitioning(clustering, numPartitions)
case OrderedDistribution(ordering) => RangePartitioning(ordering, numPartitions)
case dist => sys.error(s"Do not know how to satisfy distribution $dist")
}
}
private def ensureDistributionAndOrdering(operator: SparkPlan): SparkPlan = {
val requiredChildDistributions: Seq[Distribution] = operator.requiredChildDistribution
val requiredChildOrderings: Seq[Seq[SortOrder]] = operator.requiredChildOrdering
var children: Seq[SparkPlan] = operator.children
assert(requiredChildDistributions.length == children.length)
assert(requiredChildOrderings.length == children.length)
// Ensure that the operator's children satisfy their output distribution requirements:
children = children.zip(requiredChildDistributions).map { case (child, distribution) =>
if (child.outputPartitioning.satisfies(distribution)) {
child
} else {
Exchange(canonicalPartitioning(distribution), child)
}
}
// If the operator has multiple children and specifies child output distributions (e.g. join),
// then the children's output partitionings must be compatible:
if (children.length > 1
&& requiredChildDistributions.toSet != Set(UnspecifiedDistribution)
&& !Partitioning.allCompatible(children.map(_.outputPartitioning))) {
children = children.zip(requiredChildDistributions).map { case (child, distribution) =>
val targetPartitioning = canonicalPartitioning(distribution)
if (child.outputPartitioning.guarantees(targetPartitioning)) {
child
} else {
Exchange(targetPartitioning, child)
}
}
}
// Now that we've performed any necessary shuffles, add sorts to guarantee output orderings:
children = children.zip(requiredChildOrderings).map { case (child, requiredOrdering) =>
if (requiredOrdering.nonEmpty) {
// If child.outputOrdering is [a, b] and requiredOrdering is [a], we do not need to sort.
if (requiredOrdering != child.outputOrdering.take(requiredOrdering.length)) {
sqlContext.planner.BasicOperators.getSortOperator(requiredOrdering, global = false, child)
} else {
child
}
} else {
child
}
}
operator.withNewChildren(children)
}
def apply(plan: SparkPlan): SparkPlan = plan.transformUp {
case operator: SparkPlan => ensureDistributionAndOrdering(operator)
}
}
|
practice-vishnoi/dev-spark-1
|
sql/core/src/main/scala/org/apache/spark/sql/execution/Exchange.scala
|
Scala
|
apache-2.0
| 12,906 |
package com.sircamp.algorithms.clustering
import org.apache.spark.mllib.clustering.{BisectingKMeans, BisectingKMeansModel, KMeans, KMeansModel}
import org.apache.spark.rdd.RDD
import org.apache.spark.mllib.linalg.Vector
/**
* Created by stefano on 29/01/17.
*/
object ClusteringKmeansBuilder {
def buildKmeansPredictionModel(trainingData:RDD[Vector],K:Int,numIterations:Int): KMeansModel ={
KMeans.train(trainingData, K, numIterations)
}
def buildBisectionKmeansPredictionModel(trainingData:RDD[Vector],K:Int,numIterations:Int): BisectingKMeansModel ={
val bkm = new BisectingKMeans().setK(K)
bkm.run(trainingData)
}
}
|
sirCamp/mushrooms-ml-classfier-scala-spark
|
src/main/scala/com/sircamp/algorithms/clustering/ClusteringKmeansBuilder.scala
|
Scala
|
mit
| 655 |
package com.github.sorhus.webalytics.cruft.model
import com.github.sorhus.webalytics.akka.event._
import com.github.sorhus.webalytics.model._
import com.github.sorhus.webalytics.cruft.redis.RedisMetaDao
import scala.collection.mutable
import scala.concurrent.{ExecutionContext, Future}
trait MetaDao {
def addMeta(bucket: Bucket, element: Element): Future[Any]
def getDocumentId(element_id: ElementId): Long
def getDimensionValues(dimensions: List[Dimension]): List[(Dimension, Set[Value])]
def time(name: String)(f: Nothing) = {
}
}
class DelayedBatchInsertMetaDao(impl: RedisMetaDao)(implicit context: ExecutionContext) extends MetaDao {
var id: Long = 0
val metaBuckets = mutable.Map[Bucket, Element]()
val metaDocumentIds = mutable.Map[String, Long]()
override def addMeta(bucket: Bucket, element: Element) = Future {
metaBuckets.put(bucket, Element.merge(metaBuckets.getOrElse(bucket, Element.fromMap(Map())) :: element :: Nil))
}
def commit() = {
val futures = impl.batchInsertDocumentIds(metaDocumentIds.toMap).toList :::
metaBuckets.toList.map{case(bucket, element) =>
impl.addMeta(bucket, element)
}
Future.sequence(futures)
}
override def getDocumentId(element_id: ElementId): Long = {
id = id + 1L
metaDocumentIds.put(element_id.e, id)
id
}
override def getDimensionValues(dimensions: List[Dimension]): List[(Dimension, Set[Value])] = {
impl.getDimensionValues(dimensions)
}
}
class DevNullMetaDao extends MetaDao {
override def addMeta(bucket: Bucket, element: Element): Future[Any] = Future.successful("")
override def getDocumentId(element_id: ElementId): Long = -1L
override def getDimensionValues(dimensions: List[Dimension]): List[(Dimension, Set[Value])] = Nil
}
|
sorhus/webalytics
|
service/src/main/scala/com/github/sorhus/webalytics/cruft/model/MetaDao.scala
|
Scala
|
gpl-3.0
| 1,786 |
package blended.security.ssl
import java.math.BigInteger
import java.security.cert.X509Certificate
import blended.testsupport.scalatest.LoggingFreeSpec
import org.scalacheck.Gen
import org.scalatest.matchers.should.Matchers
import org.scalatestplus.scalacheck.ScalaCheckPropertyChecks
class SelfSignedProviderSpec extends LoggingFreeSpec
with Matchers
with ScalaCheckPropertyChecks
with SecurityTestSupport
with CertificateRequestBuilder
with CertificateSigner {
"The self signed certificate provider should" - {
"create a self signed certificate with the hostname populated signed with it's own key" in {
forAll(Gen.alphaNumStr) { n =>
whenever(n.trim().nonEmpty) {
val holder = createRootCertificate(cn = n).get
val cert : X509Certificate = holder.chain.head
cert.getSerialNumber() should be(new BigInteger("1"))
cert.getIssuerDN().toString should be(cert.getSubjectX500Principal().toString)
holder.chain should have size 1
}
}
}
"update a self signed certificate by maintaining the same key pair and increasing the serial number" in {
val cnProvider : CommonNameProvider = HostnameCNProvider("root")
val provider : CertificateProvider =
new SelfSignedCertificateProvider(selfSignedCfg(cnProvider))
val cert : CertificateHolder = provider.refreshCertificate(None, cnProvider).get
val certNew : CertificateHolder = provider.refreshCertificate(Some(cert), cnProvider).get
val c1 : X509Certificate = cert.chain.head
val c2 : X509Certificate = certNew.chain.head
c1.getIssuerDN() should be(c1.getSubjectDN())
c1.getSerialNumber() should be(new BigInteger("1"))
cert.chain should have size 1
c2.getIssuerDN() should be(c2.getSubjectDN())
c2.getSerialNumber() should be(new BigInteger("2"))
certNew.chain should have size 1
c1.getPublicKey() should equal(c2.getPublicKey())
cert.privateKey should equal(certNew.privateKey)
}
"requires a private key in in the old key to refresh" in {
val cnProvider : CommonNameProvider = HostnameCNProvider("root")
val provider : CertificateProvider =
new SelfSignedCertificateProvider(selfSignedCfg(cnProvider))
val cert : CertificateHolder = provider.refreshCertificate(None, cnProvider).get
val pubOnly : CertificateHolder = cert.copy(privateKey = None)
intercept[NoPrivateKeyException] {
provider.refreshCertificate(Some(pubOnly), cnProvider).get
}
}
}
}
|
woq-blended/blended
|
blended.security.ssl/src/test/scala/blended/security/ssl/SelfSignedProviderSpec.scala
|
Scala
|
apache-2.0
| 2,571 |
package com.avsystem.commons
package redis.commands
import com.avsystem.commons.redis._
import com.avsystem.commons.redis.protocol.{ArrayMsg, ErrorMsg, NullArrayMsg, RedisMsg}
trait TransactionApi extends ApiSubset {
/** Executes [[http://redis.io/commands/watch WATCH]] */
def watch(key: Key, keys: Key*): Result[Unit] =
execute(new Watch(key +:: keys))
/** Executes [[http://redis.io/commands/watch WATCH]]
* or does nothing when `keys` is empty, without sending the command to Redis */
def watch(keys: Iterable[Key]): Result[Unit] =
execute(new Watch(keys))
/** Executes [[http://redis.io/commands/unwatch UNWATCH]] */
def unwatch: Result[Unit] =
execute(Unwatch)
private final class Watch(keys: Iterable[Key]) extends RedisUnitCommand with OperationCommand {
val encoded: Encoded = encoder("WATCH").keys(keys).result
override def updateWatchState(message: RedisMsg, state: WatchState): Unit = message match {
case RedisMsg.Ok => state.watching = true
case _ =>
}
override def immediateResult: Opt[Unit] = whenEmpty(keys, ())
}
private object Unwatch extends RedisUnitCommand with OperationCommand {
val encoded: Encoded = encoder("UNWATCH").result
override def updateWatchState(message: RedisMsg, state: WatchState): Unit = message match {
case RedisMsg.Ok => state.watching = false
case _ =>
}
}
}
private[redis] object Multi extends UnsafeCommand {
val encoded: Encoded = encoder("MULTI").result
}
private[redis] object Exec extends UnsafeCommand {
val encoded: Encoded = encoder("EXEC").result
override def updateWatchState(message: RedisMsg, state: WatchState): Unit = message match {
case _: ArrayMsg[RedisMsg] | NullArrayMsg => state.watching = false
case err: ErrorMsg if err.errorCode == "EXECABORT" => state.watching = false
case _ =>
}
}
private[redis] object Discard extends UnsafeCommand {
val encoded: Encoded = encoder("DISCARD").result
override def updateWatchState(message: RedisMsg, state: WatchState): Unit = message match {
case RedisMsg.Ok => state.watching = false
case _ =>
}
}
|
AVSystem/scala-commons
|
commons-redis/src/main/scala/com/avsystem/commons/redis/commands/transactions.scala
|
Scala
|
mit
| 2,135 |
package forcomp
import org.scalatest.FunSuite
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
import Anagrams._
@RunWith(classOf[JUnitRunner])class AnagramsSuite extends FunSuite {
test("wordOccurrences: abcd") {
val p = wordOccurrences("abcd");
println(p);
assert(wordOccurrences("abcd") === List(('a', 1), ('b', 1), ('c', 1), ('d', 1)))
}
test("wordOccurrences: Robert") {
assert(wordOccurrences("Robert") === List(('b', 1), ('e', 1), ('o', 1), ('r', 2), ('t', 1)))
}
test("sentenceOccurrences: abcd e") {
assert(sentenceOccurrences(List("abcd", "e")) === List(('a', 1), ('b', 1), ('c', 1), ('d', 1), ('e', 1)))
}
test("dictionaryByOccurrences.get: eat") {
// dictionaryByOccurrences.foreach(println);
// println( "DICTIONARY BY OCC "+dictionaryByOccurrences)
assert(dictionaryByOccurrences.get(List(('a', 1), ('e', 1), ('t', 1))).map(_.toSet) === Some(Set("ate", "eat", "tea")))
}
test("word anagrams: married") {
assert(wordAnagrams("married").toSet === Set("married", "admirer"))
}
test("word anagrams: player") {
assert(wordAnagrams("player").toSet === Set("parley", "pearly", "player", "replay"))
}
test("word anagrams: Elvis") {
println(wordAnagrams("Elvis").toSet)
assert(wordAnagrams("Elvis").toSet === Set("lives", "veils", "evils", "Elvis", "Levis"))
}
test("word anagrams: Easter") {
assert(wordAnagrams("Easter").toSet === Set("easter", "eaters", "Teresa"))
}
test("subtract: lard - r") {
val lard = List(('a', 1), ('d', 1), ('l', 1), ('r', 1))
val r = List(('r', 1))
val lad = List(('a', 1), ('d', 1), ('l', 1))
val sub = subtract(lard, r)
println (sub)
assert(subtract(lard, r) === lad)
}
test("combinations: []") {
assert(combinations(Nil) === List(Nil))
}
test("combinations: abba") {
val abba = List(('a', 2), ('b', 2))
val abbacomb = List(
List(),
List(('a', 1)),
List(('a', 2)),
List(('b', 1)),
List(('a', 1), ('b', 1)),
List(('a', 2), ('b', 1)),
List(('b', 2)),
List(('a', 1), ('b', 2)),
List(('a', 2), ('b', 2))
)
// val comboExpected = combinations(abba)
// println(comboExpected)
// println(comboExpected toSet)
assert(combinations(abba).toSet === abbacomb.toSet)
}
//
//
//
// test("sentence anagrams: []") {
// val sentence = List()
// assert(sentenceAnagrams(sentence) === List(Nil))
// }
//
//// test("sentence anagrams: single") {
//// val sentence = List("x")
//// val anas = List(
//// List("Rex", "Lin", "Zulu"),
//// List("nil", "Zulu", "Rex"),
//// List("Rex", "nil", "Zulu"),
//// List("Zulu", "Rex", "Lin"),
//// List("null", "Uzi", "Rex"),
//// List("Rex", "Zulu", "Lin"),
//// List("Uzi", "null", "Rex"),
//// List("Rex", "null", "Uzi"),
//// List("null", "Rex", "Uzi"),
//// List("Lin", "Rex", "Zulu"),
//// List("nil", "Rex", "Zulu"),
//// List("Rex", "Uzi", "null"),
//// List("Rex", "Zulu", "nil"),
//// List("Zulu", "Rex", "nil"),
//// List("Zulu", "Lin", "Rex"),
//// List("Lin", "Zulu", "Rex"),
//// List("Uzi", "Rex", "null"),
//// List("Zulu", "nil", "Rex"),
//// List("rulez", "Linux"),
//// List("Linux", "rulez")
//// )
//// val p = sentenceAnagrams(sentence).toSet ;
//// println(p)
//// assert(sentenceAnagrams(sentence).toSet === anas.toSet)
//// }
//
test("sentence anagrams: Linux rulez") {
val sentence = List("Linux", "rulez")
val anas = List(
List("Rex", "Lin", "Zulu"),
List("nil", "Zulu", "Rex"),
List("Rex", "nil", "Zulu"),
List("Zulu", "Rex", "Lin"),
List("null", "Uzi", "Rex"),
List("Rex", "Zulu", "Lin"),
List("Uzi", "null", "Rex"),
List("Rex", "null", "Uzi"),
List("null", "Rex", "Uzi"),
List("Lin", "Rex", "Zulu"),
List("nil", "Rex", "Zulu"),
List("Rex", "Uzi", "null"),
List("Rex", "Zulu", "nil"),
List("Zulu", "Rex", "nil"),
List("Zulu", "Lin", "Rex"),
List("Lin", "Zulu", "Rex"),
List("Uzi", "Rex", "null"),
List("Zulu", "nil", "Rex"),
List("rulez", "Linux"),
List("Linux", "rulez")
)
//val p = sentenceAnagrams(sentence).toSet ;
//println(p)
assert(sentenceAnagrams(sentence).toSet === anas.toSet)
}
}
|
samsol/FunProg-scala
|
forcomp/src/test/scala/forcomp/AnagramsSuite.scala
|
Scala
|
agpl-3.0
| 4,398 |
sealed trait WC
case class Stub(chars: String) extends WC
case class Part(lStub: String, words: Int, rStub: String) extends WC
val wcMonoid: Monoid[WC] = new Monoid[WC] {
// The empty result, where we haven't seen any characters yet.
val zero = Stub("")
def op(a: WC, b: WC) = (a, b) match {
case (Stub(c), Stub(d)) => Stub(c + d)
case (Stub(c), Part(l, w, r)) => Part(c + l, w, r)
case (Part(l, w, r), Stub(c)) => Part(l, w, r + c)
case (Part(l1, w1, r1), Part(l2, w2, r2)) =>
Part(l1, w1 + (if ((r1 + l2).isEmpty) 0 else 1) + w2, r2)
}
}
|
ShokuninSan/fpinscala
|
answerkey/monoids/11.answer.scala
|
Scala
|
mit
| 573 |
/**
* Copyright (C) 2016 Verizon. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.util
import com.typesafe.config.Config
import com.verizon.bda.trapezium.framework.ApplicationManager
import com.verizon.bda.trapezium.framework.manager.ApplicationConfig
import org.apache.spark.SparkConf
import org.slf4j.LoggerFactory
/**
* Created by Pankaj on 6/3/16.
*/
object TestUtils {
val logger = LoggerFactory.getLogger(this.getClass)
def getSparkConf(appConfig: ApplicationConfig = ApplicationManager.getConfig()): SparkConf = {
val sparkConfigParam: Config = appConfig.sparkConfParam
val sparkConf = new SparkConf
sparkConf.setAppName(appConfig.appName)
if (!sparkConfigParam.isEmpty) {
val keyValueItr = sparkConfigParam.entrySet().iterator()
while (keyValueItr.hasNext) {
val sparkConfParam = keyValueItr.next()
sparkConf.set(sparkConfParam.getKey, sparkConfParam.getValue.unwrapped().toString)
logger.info(s"${sparkConfParam.getKey}: ${sparkConfParam.getValue.unwrapped().toString}")
}
}
if (appConfig.env == "local") sparkConf.setMaster("local[2]")
sparkConf
}
}
|
Verizon/trapezium
|
framework/src/test/scala/org/apache/spark/util/TestUtils.scala
|
Scala
|
apache-2.0
| 1,688 |
package org.akoshterek.backgammon.nn.encog.activation
import org.encog.engine.network.activation.ActivationFunction
import org.encog.util.obj.ActivationUtil
/**
* Created by Alex on 20-05-17.
*/
@SerialVersionUID(1L)
class ActivationLeakingRelu(thresholdHigh: Double, thresholdLow: Double, high: Double, low: Double, leak: Double) extends ActivationFunction {
private val params = Array[Double] (thresholdHigh, thresholdLow, high, low, leak)
require(leak >= 0 && leak <= 1, "Leak must be in [0, 1] range")
val PARAM_RAMP_HIGH_THRESHOLD = 0
val PARAM_RAMP_LOW_THRESHOLD = 1
val PARAM_RAMP_HIGH = 2
val PARAM_RAMP_LOW = 3
val PARAM_RAMP_LEAK = 4
private def slope: Double = (params(PARAM_RAMP_HIGH_THRESHOLD) - params(PARAM_RAMP_LOW_THRESHOLD)) / (params(PARAM_RAMP_HIGH) - params(PARAM_RAMP_LOW))
def this() = {
this(1.0D, -1.0D, 1.0D, -1.0D, 0.01)
}
override def activationFunction(x: Array[Double], start: Int, size: Int): Unit = {
var i = start
while (i < start + size) {
val v = x(i)
if (v < params(PARAM_RAMP_LOW_THRESHOLD)) {
x(i) = params(PARAM_RAMP_LOW)
}
else if (v > params(PARAM_RAMP_HIGH_THRESHOLD)) {
x(i) = params(PARAM_RAMP_HIGH)
}
else {
x(i) = if (v >= 0) slope * v else params(PARAM_RAMP_LEAK) * v
}
i += 1
}
}
override def clone = new ActivationLeakingRelu(
params(PARAM_RAMP_HIGH_THRESHOLD),
params(PARAM_RAMP_LOW_THRESHOLD),
params(PARAM_RAMP_HIGH),
params(PARAM_RAMP_LOW),
params(PARAM_RAMP_LEAK)
)
override def derivativeFunction(b: Double, a: Double): Double = {
if (b >= 0)
slope
else
params(PARAM_RAMP_LEAK)
}
override def getParamNames: Array[String] = {
Array[String]("thresholdHigh", "thresholdLow", "high", "low", "leak")
}
override def getParams: Array[Double] = params
override def hasDerivative = true
override def setParam(index: Int, value: Double): Unit = {
params(index) = value
}
override def getFactoryCode: String = ActivationUtil.generateActivationFactory("lrelu", this)
}
|
akoshterek/MultiGammonJava
|
multi-gammon-core/src/main/java/org/akoshterek/backgammon/nn/encog/activation/ActivationLeakingRelu.scala
|
Scala
|
gpl-3.0
| 2,113 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.keras.nn
import com.intel.analytics.bigdl.keras.KerasBaseSpec
import com.intel.analytics.bigdl.dllib.nn.abstractnn.{AbstractModule, DataFormat}
import com.intel.analytics.bigdl.dllib.nn.internal.{MaxPooling2D, Sequential => KSequential}
import com.intel.analytics.bigdl.dllib.tensor.Tensor
import com.intel.analytics.bigdl.dllib.utils.Shape
import com.intel.analytics.bigdl.dllib.utils.serializer.ModuleSerializationTest
import scala.util.Random
class MaxPooling2DSpec extends KerasBaseSpec{
"MaxPooling2D NCHW" should "be the same as Keras" in {
val kerasCode =
"""
|input_tensor = Input(shape=[3, 24, 24])
|input = np.random.random([2, 3, 24, 24])
|output_tensor = MaxPooling2D(dim_ordering="th")(input_tensor)
|model = Model(input=input_tensor, output=output_tensor)
""".stripMargin
val seq = KSequential[Float]()
val layer = MaxPooling2D[Float](inputShape = Shape(3, 24, 24))
seq.add(layer)
checkOutputAndGrad(seq.asInstanceOf[AbstractModule[Tensor[Float], Tensor[Float], Float]],
kerasCode)
}
"MaxPooling2D NHWC" should "be the same as Keras" in {
val kerasCode =
"""
|input_tensor = Input(shape=[32, 28, 5])
|input = np.random.random([3, 32, 28, 5])
|output_tensor = MaxPooling2D(pool_size=(2, 3), strides=(1, 2),
| dim_ordering="tf")(input_tensor)
|model = Model(input=input_tensor, output=output_tensor)
""".stripMargin
val seq = KSequential[Float]()
val layer = MaxPooling2D[Float](poolSize = (2, 3), strides = (1, 2),
dimOrdering = "tf", inputShape = Shape(32, 28, 5))
seq.add(layer)
checkOutputAndGrad(seq.asInstanceOf[AbstractModule[Tensor[Float], Tensor[Float], Float]],
kerasCode)
}
"MaxPooling2D same border mode" should "be the same as Keras" in {
val kerasCode =
"""
|input_tensor = Input(shape=[3, 24, 24])
|input = np.random.random([2, 3, 24, 24])
|output_tensor = MaxPooling2D(strides=(1, 2), border_mode="same",
| dim_ordering="th")(input_tensor)
|model = Model(input=input_tensor, output=output_tensor)
""".stripMargin
val seq = KSequential[Float]()
val layer = MaxPooling2D[Float](strides = (1, 2), borderMode = "same",
inputShape = Shape(3, 24, 24))
seq.add(layer)
checkOutputAndGrad(seq.asInstanceOf[AbstractModule[Tensor[Float], Tensor[Float], Float]],
kerasCode)
}
}
class MaxPooling2DSerialTest extends ModuleSerializationTest {
override def test(): Unit = {
val layer = MaxPooling2D[Float](inputShape = Shape(3, 24, 24))
layer.build(Shape(2, 3, 24, 24))
val input = Tensor[Float](2, 3, 24, 24).apply1(_ => Random.nextFloat())
runSerializationTest(layer, input)
}
}
|
intel-analytics/BigDL
|
scala/dllib/src/test/scala/com/intel/analytics/bigdl/dllib/nn/keras/nn/MaxPooling2DSpec.scala
|
Scala
|
apache-2.0
| 3,465 |
package effectful.impl
import effectful._
import cats._
import scala.collection.generic.CanBuildFrom
object EffectIteratorOps {
def collect[E[_],A,M[_]](ea: EffectIterator[E,A])(implicit cbf:CanBuildFrom[Nothing,A,M[A]]) : E[M[A]] = {
import ea._
import Monad.ops._
val builder = cbf()
for {
initial <- initialize()
result <- {
def loop(state: S): E[M[A]] = {
next(state).flatMap {
case Some((s,a)) =>
builder += a
loop(s)
case None =>
E.pure(builder.result())
}
}
loop(initial)
}
} yield result
}
def empty[E[_],A](implicit E: Monad[E]) : EffectIterator[E,A] = {
val _E = E
new EffectIterator[E,A] {
override implicit val E = _E
type S = Unit
val none : E[Option[(Unit,A)]] = E.pure(None)
def next(s: S) = none
def initialize() = E.pure(())
}
}
def apply[E[_],SS,A](
initialize: () => E[SS]
)(
next: SS => E[Option[(SS,A)]]
)(implicit
E: Monad[E]
) : EffectIterator[E,A] = {
// import Monad.ops._
val _E = E
val _next = next
val _initialize = initialize
new EffectIterator[E,A] {
override implicit val E = _E
type S = SS
def initialize() = _initialize()
def next(s: S) = _next(s)
}
}
def computed[E[_],A](a: A*)(implicit E: Monad[E]) : EffectIterator[E,A] =
FromIterator(a.iterator)
// def flatten[E[_],A](
// eia: E[EffectIterator[E,A]]
// )(implicit E: Monad[E]) : EffectIterator[E,A] =
// Flatten[E,A](eia)
case class Map[E[_],A,B](
base: EffectIterator[E,A],
f: A => B
)(implicit
val E: Monad[E]
) extends EffectIterator[E,B] {
type S = base.S
override def initialize(): E[S] = base.initialize()
override def next(s: S): E[Option[(S,B)]] = {
import Monad.ops._
base.next(s).map (_.map { case (nextS,a) => (nextS,f(a)) })
}
}
// case class FlatMap[E[_],A,B](
// base: EffectIterator[E,A],
// f: A => EffectIterator[E,B]
// )(implicit
// val E: Monad[E]
// ) extends EffectIterator[E,B] {
// import Monad.ops._
//
// type S1 = base.S
// case class Inner(
// ib: EffectIterator[E,B]
// )(
// val s: ib.S
// ) {
// def next() : E[Option[(ib.S,B)]] =
// ib.next(s)
// }
//
// case class S(
// s1: S1,
// optInner: Option[Inner]
// )
//
// override def initialize(): E[S] =
// base.initialize().map(S(_,None))
//
// override def next(s: S): E[Option[(S,B)]] = {
// s match {
// case S(s1,None) =>
// base.next(s1).flatMap {
// case Some((nextS1,a)) =>
// val ib = f(a)
// for {
// state <- ib.initialize()
// result <- next(S(s1,Some(Inner(ib)(state))))
// } yield result
// case None =>
// E.pure(None)
// }
// case S(s1,Some(inner@Inner(ib))) =>
// inner.next().flatMap {
// case Some((nextInnerS,b)) =>
// E.pure(Some((S(s1,Some(Inner(ib)(nextInnerS))),b)))
// case None =>
// next(S(s1,None))
// }
// }
// }
// }
case class Append[E[_],A](
first: EffectIterator[E,A],
second: EffectIterator[E,A]
)(implicit
val E: Monad[E]
) extends EffectIterator[E,A] {
import Monad.ops._
type S1 = first.S
type S2 = second.S
type S = (S1,Option[S2])
override def initialize(): E[(S1, Option[S2])] =
first.initialize().map((_,None))
override def next(s: S): E[Option[(S,A)]] =
s match {
case (s1,None) =>
first.next(s1).flatMap {
case None =>
for {
s2 <- second.initialize()
result <- next((s1,Some(s2)))
} yield result
case Some((nextS1,a)) =>
E.pure(Some(((nextS1,None),a)))
}
case (s1,Some(s2)) =>
second.next(s2).map {
case None => None
case Some((nextS2,a)) =>
Some(((s1,Some(nextS2)),a))
}
}
}
case class FromIterator[E[_],A](
values: Iterator[A]
)(implicit
val E: Monad[E]
) extends EffectIterator[E,A] {
type S = Iterator[A]
def initialize() = E.pure(values)
override def next(s: S): E[Option[(S,A)]] =
if(s.hasNext) {
E.pure(Some((s,s.next())))
} else {
E.pure(None)
}
}
// case class Flatten[E[_],A](
// eia: E[EffectIterator[E,A]]
// )(implicit
// val E: Monad[E]
// ) extends EffectIterator[E,A] {
// import Monad.ops._
//
// type S = eia
//
// def next(s: Flatten.this.type) = ???
//
// def initialize() = eia.flatMap(_.initialize())
// }
}
|
lancegatlin/effectful-demo
|
src/main/scala/effectful/impl/EffectIteratorOps.scala
|
Scala
|
mit
| 4,835 |
package examples.custom_command
import com.coldcore.akkaftp.ftp.core.{FtpState, Session}
import com.coldcore.akkaftp.ftp.command.{DefaultCommandFactory, Reply, Command}
import java.text.SimpleDateFormat
import java.util.Date
import akka.actor.ActorSystem
import com.coldcore.akkaftp.{Settings, Launcher}
case class XTimeCommand(param: String, session: Session) extends Command {
override def exec: Reply = {
val sdf = new SimpleDateFormat(if (param.isEmpty) "dd/MM/yyyy HH:mm:ss" else param)
val date = sdf.format(new Date)
Reply(200, s"Server time $date")
}
}
class CustomCommandFactory extends DefaultCommandFactory {
def mycmd(name: String, param: String, session: Session): Option[Command] =
Option(name match {
case "X-TIME" => XTimeCommand(param, session)
case _ => null
})
override def cmd(name: String, param: String, session: Session): Command =
mycmd(name, param, session) getOrElse super.cmd(name, param, session)
}
class CustomFtpState(override val system: ActorSystem,
override val hostname: String,
override val port: Int,
override val guest: Boolean,
override val usersdir: String,
override val externalIp: String,
override val pasvPorts: Seq[Int]) extends
FtpState(system, hostname, port, guest, usersdir, externalIp, pasvPorts) {
override val commandFactory = new CustomCommandFactory
}
class CustomLauncher extends Launcher {
override def createFtpState(system: ActorSystem): FtpState = {
val hostname = Settings(system).hostname
val port = Settings(system).port
val guest = Settings(system).guest
val homedir = Settings(system).homedir
val externalIp = Settings(system).externalIp
val pasvPorts = Settings(system).pasvPorts
new CustomFtpState(system, hostname, port, guest, homedir, externalIp, pasvPorts)
}
}
object main {
def main(args: Array[String]) {
new CustomLauncher().start()
}
}
// sbt "run-main examples.custom_command.main"
|
2nolife/akka-ftp
|
examples/custom-command/scala/custom-command.scala
|
Scala
|
lgpl-3.0
| 2,139 |
/**
* Copyright 2015 Thomson Reuters
*
* Licensed under the Apache License, Version 2.0 (the βLicenseβ); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an βAS ISβ BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmwell.zstore
import java.nio.ByteBuffer
import cmwell.driver.{Dao, DaoExecution}
import cmwell.util.concurrent._
import cmwell.util.numeric._
import com.datastax.driver.core.utils.Bytes
import com.datastax.driver.core._
import com.typesafe.config.ConfigFactory
import com.typesafe.scalalogging.LazyLogging
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
import scala.concurrent.duration._
import scala.util.{Failure, Success}
object ZStore {
def apply(dao: Dao): ZStore = new ZStoreImpl(dao)
}
// Design choice: using (seconds: Int) TTL, and not Duration. There are reasons...
trait ZStore {
def put(uzid: String, value: Array[Byte], batched: Boolean = false): Future[Unit]
def put(uzid: String, value: Array[Byte], secondsToLive: Int, batched: Boolean): Future[Unit] // not using FiniteDuration, since CAS only supports INT values for TTL
def get(uzid: String): Future[Array[Byte]]
def get(uzid: String, dontRetry: Boolean): Future[Array[Byte]]
def getOpt(uzid: String, dontRetry: Boolean = false): Future[Option[Array[Byte]]]
def putString(uzid:String, value:String, batched:Boolean = false):Future[Unit] = {
put(uzid, value.getBytes("utf-8"), batched)
}
def putLong(uzid: String, value: Long, batched: Boolean = false): Future[Unit] =
put(uzid, ByteBuffer.allocate(8).putLong(value).array(), batched)
def putInt(uzid: String, value: Int, batched: Boolean = false): Future[Unit] = put(uzid, ByteBuffer.allocate(4).putInt(value).array(), batched)
def putBoolean(uzid: String, value: Boolean, batched: Boolean = false): Future[Unit] = {
val i:Int = if(value) 1 else 0
put(uzid, ByteBuffer.allocate(1).put(i.toByte).array(), batched)
}
def getString(uzid:String):Future[String] = get(uzid).map{new String(_)}
def getStringOpt(uzid:String):Future[Option[String]] = getOpt(uzid).map{ _.map{new String(_)}}
def getLong(uzid: String): Future[Long] = get(uzid).map{ bytes =>
ByteBuffer.wrap(bytes).getLong
}
def getInt(uzid: String): Future[Int] = get(uzid).map{ bytes =>
ByteBuffer.wrap(bytes).getInt
}
def getBoolean(uzid: String): Future[Boolean] = get(uzid).map{ bytes =>
if(bytes(0).toInt == 1)
true
else
false
}
def getLongOpt(uzid: String): Future[Option[Long]] = getOpt(uzid, true).map{ bytesOpt =>
bytesOpt.map{ ByteBuffer.wrap(_).getLong}
}
def getIntOpt(uzid: String): Future[Option[Int]] = getOpt(uzid, true).map{ bytesOpt =>
bytesOpt.map{ ByteBuffer.wrap(_).getInt}
}
def getBooleanOpt(uzid: String): Future[Option[Boolean]] = getOpt(uzid, true).map{ bytesOpt =>
bytesOpt.map{ bytes => if(bytes(0) == 1) true else false}
}
def remove(uzid: String): Future[Unit]
}
class ZStoreImpl(dao: Dao) extends ZStore with DaoExecution with LazyLogging {
implicit val daoProxy = dao
override def put(uzid: String, value: Array[Byte], batched: Boolean): Future[Unit] = put(uzid, value, neverExpire, batched)
override def put(uzid: String, value: Array[Byte], secondsToLive: Int, batched: Boolean): Future[Unit] = {
def serialize(obj: ZStoreObj): Vector[ZStoreRow] = {
// NOTE: "adler" < "bytes" < "data"
val adlerRow = ZStoreRow(obj.uzid, "adler", toIntegerBytes(obj.adler32))
val bytesRow = ZStoreRow(obj.uzid, "bytes", toLongBytes(obj.value.length.toLong))
val indexLength = (((obj.value.length - 1) / chunkSize) + 1).toString.length
val dataRows = obj.value.grouped(chunkSize).zipWithIndex.map {
case (chunk, index) => ZStoreRow(obj.uzid, s"data%0${indexLength}d".format(index), chunk)
}
Vector(adlerRow, bytesRow) ++ dataRows
}
val ttl = new java.lang.Integer(secondsToLive)
val adler32 = cmwell.util.string.Hash.adler32int(value)
val rows = serialize(ZStoreObj(uzid, adler32, value))
def rowToStatement(row: ZStoreRow) = putPStmt.bind(row.uzid, row.field, ByteBuffer.wrap(row.chunk), ttl).
setConsistencyLevel(ConsistencyLevel.QUORUM)
if(batched) {
val batch = new BatchStatement()
rows.foreach { row =>
batch.add(putPStmt.bind(row.uzid, row.field, ByteBuffer.wrap(row.chunk), ttl))
}
execWithRetry(batch.setConsistencyLevel(ConsistencyLevel.QUORUM)).map(_ => ())
}
else cmwell.util.concurrent.travector(rows)(rowToStatement _ andThen execWithRetry).map(_ => ())
}
override def get(uzid: String): Future[Array[Byte]] = {
val getStmtOne = getPStmt.bind(uzid).setConsistencyLevel(ConsistencyLevel.ONE)
val getStmtQuorum = getPStmt.bind(uzid).setConsistencyLevel(ConsistencyLevel.QUORUM)
def fetchAndDeserialize(stmt: Statement) =
retry(exec(stmt).map(deserialize(uzid) _ andThen (_.fold(throw new NoSuchElementException)(_.value))))
fetchAndDeserialize(getStmtOne).andThen {
case Success(value) => value
case Failure(e) =>
logger.warn(s"[zStore] Reading uzid $uzid with cl=ONE threw $e. Trying cl=QUORUM...")
fetchAndDeserialize(getStmtQuorum).andThen {
case Success(value) => value
case Failure(e1) =>
logger.warn(s"[zStore] Reading uzid $uzid with cl=QUORUM threw $e1. Trying last time with cl=ONE...")
fetchAndDeserialize(getStmtOne)
}
}
}
override def get(uzid: String, dontRetry: Boolean): Future[Array[Byte]] = {
if (!dontRetry)
get(uzid)
else {
val stmt = getPStmt.bind(uzid).setConsistencyLevel(ConsistencyLevel.ONE)
exec(stmt).map(deserialize(uzid) _ andThen (_.fold(throw new NoSuchElementException)(_.value)))
}
}
override def getOpt(uzid: String, dontRetry: Boolean): Future[Option[Array[Byte]]] = {
require(dontRetry, "getOpt only works in dontRetry mode") // might be implemented, but YAGNI
val stmt = getPStmt.bind(uzid).setConsistencyLevel(ConsistencyLevel.ONE)
exec(stmt).map(deserialize(uzid)(_)).map(_.map(_.value)) // not throwing an exception, but returning an Option
}
override def remove(uzid: String): Future[Unit] = {
val delStmt = delPStmt.bind(uzid).setConsistencyLevel(ConsistencyLevel.QUORUM)
execWithRetry(delStmt).map(_ => ())
}
//TODO: 8 retries + delay factor of 2, means you could retry for 50+100+200+400+800+1600+3200+6400 = 12750ms
//TODO: this change is mandatory because new driver backpressures with reject exceptions.
//TODO: we need to propogate back preasure instead of retrying for so long...
private def execWithRetry(stmt: Statement) = cmwell.util.concurrent.retry(8, 50.millis, 2)(exec(stmt))
private def exec(stmt: Statement) = executeAsyncInternal(stmt)
private def retry[T](task: Future[T]) = cmwell.util.concurrent.retry(3, 50.millis, 2)(task)
private val neverExpire = 0
private val chunkSize = ConfigFactory.load().getInt("cmwell.zstore.chunkSize")
// zStore deliberately contains its setup and not in a resource cql file.
private val createTableCqlStmt = {
// assuming data2 already exists from cmwell install
val createTableCql =
"""|
|CREATE TABLE IF NOT EXISTS data2.zstore (
| uzid text,
| field text,
| value blob,
| PRIMARY KEY (uzid, field)
|) WITH CLUSTERING ORDER BY (field ASC)
|AND compression = {'sstable_compression': 'LZ4Compressor'}
|AND caching = '{"keys":"ALL", "rows_per_partition":"ALL"}' ;
""".stripMargin
prepare(createTableCql).bind
}
dao.getSession.execute(createTableCqlStmt) // deliberately not using execAsync
private val putPStmt = prepare("INSERT INTO data2.zstore (uzid,field,value) VALUES(?,?,?) USING TTL ?") // TTL 0 == persist forever
private val getPStmt = prepare("SELECT * FROM data2.zstore WHERE uzid = ?")
private val delPStmt = prepare("DELETE FROM data2.zstore WHERE uzid = ?")
private def deserialize(uzid: String)(result: ResultSet): Option[ZStoreObj] = {
if (result.isExhausted) None
else {
val adlerRow = result.one()
require(!result.isExhausted, "cassandra ResultSet exhausted after adler row")
val adler = ByteBuffer.wrap(Bytes.getArray(adlerRow.getBytes("value"))).getInt
val bytesRow = result.one()
val bytes = ByteBuffer.wrap(Bytes.getArray(bytesRow.getBytes("value"))).getLong
if (result.isExhausted) {
require(bytes == 0 && adler == ZStoreImpl.zeroAdler, s"expected empty content for uzid [$uzid] with bytes [$bytes] and adler [$adler]")
Some(ZStoreObj(uzid, adler, Array.emptyByteArray))
} else {
val it = new Iterator[Array[Byte]] {
override def hasNext: Boolean = !result.isExhausted
override def next(): Array[Byte] = {
val row: Row = result.one()
Bytes.getArray(row.getBytes("value"))
}
}
val valueBuilder = Array.newBuilder[Byte]
valueBuilder.sizeHint(longToIntOrMaxInt(bytes))
it.foreach(valueBuilder ++= _)
val obj = ZStoreObj(uzid, adler, valueBuilder.result())
if (!obj.isCorrect) {
logger.error(s"[zStore] Reading uzid [$uzid] failed because data corruption! (stored adler[$adler] != computed adler[${obj.adlerizedValue}], stored bytes[$bytes], actual value size[${obj.value.length}])")
// throw new RuntimeException("Corrupted data!")
}
Some(obj)
}
}
}
private def longToIntOrMaxInt(long: Long) = math.min(Int.MaxValue, long).toInt
}
object ZStoreImpl {
val zeroAdler = cmwell.util.string.Hash.adler32int(Array.emptyByteArray)
}
case class ZStoreRow(uzid: String, field: String, chunk: Array[Byte])
case class ZStoreObj(uzid: String, adler32: Int, value: Array[Byte]) {
lazy val adlerizedValue = cmwell.util.string.Hash.adler32int(value)
lazy val isCorrect: Boolean = adler32 == adlerizedValue
}
/**
* an In Memory implementation of the ZStore trait
*/
class ZStoreMem extends ZStore {
private var store = Map.empty[String, Array[Byte]]
override def put(uzid: String, value: Array[Byte], batched: Boolean): Future[Unit] =
Future.successful(store += uzid -> value)
override def put(uzid: String, value: Array[Byte], secondsToLive: Int, batched: Boolean): Future[Unit] = Future.successful {
store += uzid -> value
delayedTask(secondsToLive.seconds) { store -= uzid }
}
override def get(uzid: String): Future[Array[Byte]] =
Future(store(uzid))
override def get(uzid: String, dontRetry: Boolean): Future[Array[Byte]] = get(uzid)
override def getOpt(uzid: String, dontRetry: Boolean): Future[Option[Array[Byte]]] =
Future.successful(store.get(uzid))
override def remove(uzid: String): Future[Unit] =
Future.successful(store -= uzid)
def keySet = store.keySet
}
|
nruppin/CM-Well
|
server/cmwell-zstore/src/main/scala/cmwell/zstore/ZStore.scala
|
Scala
|
apache-2.0
| 11,330 |
object Test {
"" match {
case Unapply(a, b) =>
a: Int
b: String
case UnapplySeq(a, b1, b2) =>
a: Int
b1: String
b2: String
}
}
// These used to fail `too many patterns` under -Ymacro-expand:discard
|
loskutov/intellij-scala
|
testdata/scalacTests/macros/t8934a/Test_2.scala
|
Scala
|
apache-2.0
| 240 |
package com.geeksville.mavlink
import java.net._
import org.mavlink.messages.MAVLinkMessage
import org.mavlink.messages.MAVLinkMessageFactory
import org.mavlink.IMAVLinkMessage
import com.geeksville.util.ThreadTools
import com.geeksville.akka.InstrumentedActor
import akka.actor.PoisonPill
/**
* published on our eventbus when someone wants a packet sent to the outside world
*/
// case class MavlinkSend(message: MAVLinkMessage)
/**
* Receive UDPMavlink messages and forward to actors
* Use with mavproxy like so:
* Following instructions are stale...
* mavproxy.py --master=/dev/ttyACM0 --master localhost:51200 --out=localhost:51232
*
* FIXME - make sure we don't overrun the rate packets can be read
*/
class MavlinkUDP(destHostName: Option[String] = None,
val destPortNumber: Option[Int] = None,
val localPortNumber: Option[Int] = None) extends MavlinkSender with MavlinkReceiver {
// These must be lazy - to ensure we don't do networking in the main thread (an android restriction)
lazy val serverHost = InetAddress.getByName(destHostName.get)
lazy val socket = localPortNumber.map { n => new DatagramSocket(n) }.getOrElse(new DatagramSocket)
val thread = ThreadTools.createDaemon("UDPMavReceive")(worker _)
/**
* The app last received packets from on our wellknown port number
*/
var remote: Option[SocketAddress] = None
private var shuttingDown = false
thread.start()
protected def doSendMavlink(bytes: Array[Byte]) {
//log.debug("UDPSend: " + msg)
// Do we know a remote port?
destPortNumber.map { destPort =>
val packet = new DatagramPacket(bytes, bytes.length, serverHost, destPort)
socket.send(packet)
}.getOrElse {
// Has anyone called into us?
remote.map { r =>
//log.debug(s"Sending via UDP to $r")
val packet = new DatagramPacket(bytes, bytes.length, r)
socket.send(packet)
}.getOrElse {
log.debug("Can't send message, we haven't heard from a peer")
}
}
}
override def postStop() {
shuttingDown = true
socket.close() // Force thread exit
super.postStop()
}
private def receivePacket() = {
val bytes = new Array[Byte](512)
val packet = new DatagramPacket(bytes, bytes.length)
socket.receive(packet)
remote = Some(packet.getSocketAddress)
MavlinkUtils.bytesToPacket(packet.getData)
}
private def worker() {
try {
while (!shuttingDown) {
receivePacket.foreach(handleIncomingPacket)
}
} catch {
case ex: BindException =>
log.error("Unable to bind to port!")
self ! PoisonPill
case ex: SocketException =>
if (!shuttingDown) // If we are shutting down, ignore socket exceptions
throw ex
case ex: Exception =>
log.warning("exception in UDP receiver: " + ex)
}
log.debug("UDP receiver exiting")
}
}
object MavlinkUDP {
/// The standard port number people use
val portNumber = 14550
}
|
geeksville/arduleader
|
common/src/main/scala/com/geeksville/mavlink/MavlinkUDP.scala
|
Scala
|
gpl-3.0
| 2,984 |
/*
* La Trobe University - Distributed Deep Learning System
* Copyright 2016 Matthias Langer ([email protected])
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package edu.latrobe.cublaze.modules
import edu.latrobe._
import edu.latrobe.blaze._
import edu.latrobe.blaze.modules._
import edu.latrobe.cublaze._
import edu.latrobe.native._
final class AddBias_CUDA_Baseline(override val builder: AddBiasBuilder,
override val inputHints: BuildHints,
override val seed: InstanceSeed,
override val weightBufferBuilder: ValueTensorBufferBuilder)
extends AddBias_CUDA {
private var _ones
: CUDARealTensor = _
private lazy val ones
: CUDARealTensor = {
if (_ones == null) {
_ones = CUDARealTensor.fill(device, biasLayout, Real.one)
}
_ones
}
override protected def doClose()
: Unit = {
if (_ones != null) {
_ones.close()
_ones = null
}
super.doClose()
}
// ---------------------------------------------------------------------------
// Forward propagation related.
// ---------------------------------------------------------------------------
override protected def doPredictPerValue(output: CUDARealTensor)
: Unit = {
// input: RRR GGG BBB | RRR GGG BBB
// filter: RRR GGG BBB | RRR GGG BBB
output += bias
}
override protected def doPredictPerUnit(output: CUDARealTensor)
: Unit = {
// input: RRR GGG BBB | RRR GGG BBB
// filter: RRR GGG BBB
_CUDNN.addTensor(
device,
_RealTensorNativeReal.one, bias.desc, bias.data.ptr,
_RealTensorNativeReal.one, output.desc, output.data.ptr
)
}
override protected def doPredictPerChannel(output: CUDARealTensor)
: Unit = {
// input: RRR GGG BBB | RRR GGG BBB
// filter: RGB
_CUDNN.addTensor(
device,
_RealTensorNativeReal.one, bias.desc, bias.data.ptr,
_RealTensorNativeReal.one, output.desc, output.data.ptr
)
/*
scope match {
case OperationScope.Value =>
CUDNN.addTensor(
device,
//CUDNN_ADD_FULL_TENSOR,
RealPtr.one, bias.descPtr, bias.dataPtr,
RealPtr.one, output.descPtr, output.dataPtr
)
case OperationScope.Channel =>
CUDNN.addTensor(
device,
//CUDNN_ADD_SAME_C,
RealPtr.one, bias.descPtr, bias.dataPtr,
RealPtr.one, output.descPtr, output.dataPtr
)
case OperationScope.Sample =>
CUDNN.addTensor(
device,
//CUDNN_ADD_FEATURE_MAP,
RealPtr.one, bias.descPtr, bias.dataPtr,
RealPtr.one, output.descPtr, output.dataPtr
)
case OperationScope.Batch =>
// TODO: Fix this!
output += bias.get(0)
case _ =>
throw new MatchError(scope)
}
*/
}
override protected def doPredictPerSample(output: CUDARealTensor)
: Unit = {
// input: RRR GGG BBB | RRR GGG BBB
// filter: R | R
_CUDNN.addTensor(
device,
_RealTensorNativeReal.one, bias.desc, bias.data.ptr,
_RealTensorNativeReal.one, output.desc, output.data.ptr
)
}
override protected def doPredictPerBatch(output: CUDARealTensor)
: Unit = {
// input: RRR GGG BBB | RRR GGG BBB
// filter: R
_CUDNN.addTensor(
device,
_RealTensorNativeReal.one, bias.desc, bias.data.ptr,
_RealTensorNativeReal.one, output.desc, output.data.ptr
)
}
override protected def doPredictInvPerValue(input: CUDARealTensor)
: Unit = input -= bias
override protected def doPredictInvPerUnit(input: CUDARealTensor)
: Unit = {
_CUDNN.addTensor(
device,
_RealTensorNativeReal.minusOne, bias.desc, bias.data.ptr,
_RealTensorNativeReal.one, input.desc, input.data.ptr
)
}
override protected def doPredictInvPerChannel(input: CUDARealTensor)
: Unit = {
_CUDNN.addTensor(
device,
_RealTensorNativeReal.minusOne, bias.desc, bias.data.ptr,
_RealTensorNativeReal.one, input.desc, input.data.ptr
)
/*
scope match {
case OperationScope.Channel =>
CUDNN.addTensor(
device,
//CUDNN_ADD_SAME_C,
RealPtr.minusOne, bias.descPtr, bias.dataPtr,
RealPtr.one, input.descPtr, input.dataPtr
)
case OperationScope.Sample =>
CUDNN.addTensor(
device,
//CUDNN_ADD_FEATURE_MAP,
RealPtr.minusOne, bias.descPtr, bias.dataPtr,
RealPtr.one, input.descPtr, input.dataPtr
)
case OperationScope.Batch =>
// TODO: Fix this!
input += -bias.get(0)
case _ =>
throw new MatchError(scope)
}
*/
}
override protected def doPredictInvPerSample(input: CUDARealTensor)
: Unit = {
_CUDNN.addTensor(
device,
_RealTensorNativeReal.minusOne, bias.desc, bias.data.ptr,
_RealTensorNativeReal.one, input.desc, input.data.ptr
)
}
override protected def doPredictInvPerBatch(input: CUDARealTensor)
: Unit = {
_CUDNN.addTensor(
device,
_RealTensorNativeReal.minusOne, bias.desc, bias.data.ptr,
_RealTensorNativeReal.one, input.desc, input.data.ptr
)
}
// ---------------------------------------------------------------------------
// Backward propagation related.
// ---------------------------------------------------------------------------
override protected def doDeriveWeightGradientsPerValue(error: CUDARealTensor,
sink: CUDARealTensor)
: Unit = {
// error: RRR GGG BBB | RRR GGG BBB
// sink: RRR GGG BBB | RRR GGG BBB
sink += error
}
override protected def doDeriveWeightGradientsPerUnit(error: CUDARealTensor,
sink: CUDARealTensor)
: Unit = {
// error: RRR GGG BBB | RRR GGG BBB
// sink: RRR GGG BBB
// TODO: Find something nicer than gemv!
val noRows = error.layout.size.noValues
val noCols = error.layout.noSamples
_CUBLAS.gemv(
device,
_RealTensorNativeReal.one,
error.data.ptr, noRows, noRows, noCols, aTrans = false,
ones.data.ptr, 1, noRows,
_RealTensorNativeReal.one,
sink.data.ptr, 1, noRows
)
}
override protected def doDeriveWeightGradientsPerChannel(error: CUDARealTensor,
sink: CUDARealTensor)
: Unit = {
// error: RRR GGG BBB | RRR GGG BBB
// sink: RGB
_CUDNN.convolutionBackwardBias(
device,
_RealTensorNativeReal.one, error.desc, error.data.ptr,
_RealTensorNativeReal.one, sink.desc, sink.data.ptr
)
}
override protected def doDeriveWeightGradientsPerSample(error: CUDARealTensor,
sink: CUDARealTensor)
: Unit = {
// error: RRR GGG BBB | RRR GGG BBB
// sink: R | R
// TODO: Find something nicer than gemv!
val noRows = error.layout.noSamples
val noCols = error.layout.size.noValues
_CUBLAS.gemv(
device,
_RealTensorNativeReal.one,
error.data.ptr, noRows, noRows, noCols, aTrans = true,
ones.data.ptr, 1, noRows,
_RealTensorNativeReal.one,
sink.data.ptr, 1, noRows
)
}
override protected def doDeriveWeightGradientsPerBatch(error: CUDARealTensor,
sink: CUDARealTensor)
: Unit = {
// error: RRR GGG BBB | RRR GGG BBB
// sink: R
// TODO: Find something nicer than gemv!
val noRows = 1
val noCols = error.layout.noValues
_CUBLAS.gemv(
device,
_RealTensorNativeReal.one,
error.data.ptr, noRows, noRows, noCols, aTrans = false,
ones.data.ptr, 1, noRows,
_RealTensorNativeReal.one,
sink.data.ptr, 1, noRows
)
}
}
object AddBias_CUDA_Baseline_Description
extends ModuleVariant_CUDA_Description[AddBiasBuilder] {
override def build(builder: AddBiasBuilder,
hints: BuildHints,
seed: InstanceSeed,
weightsBuilder: ValueTensorBufferBuilder)
: AddBias_CUDA_Baseline = new AddBias_CUDA_Baseline(
builder, hints, seed, weightsBuilder
)
}
|
bashimao/ltudl
|
cublaze/src/main/scala/edu/latrobe/cublaze/modules/AddBias_CUDA_Baseline.scala
|
Scala
|
apache-2.0
| 8,990 |
package org.nkvoll.javabin.routing
import akka.actor._
import akka.util.Timeout
import nl.grons.metrics.scala.Timer
import org.nkvoll.javabin.models.User
import org.nkvoll.javabin.routing.directives.{ PermissionDirectives, MetricDirectives, ClusterDirectives, AuthDirectives }
import org.nkvoll.javabin.routing.helpers.JavabinMarshallingSupport
import org.nkvoll.javabin.service.UserService.{ AuthenticateUser, GetUser }
import org.nkvoll.javabin.util.{ FutureEnrichments, SecureCookies, SprayEnrichments }
import scala.concurrent.{ Future, ExecutionContext }
import spray.http.Uri
import spray.routing._
import spray.routing.authentication.{ HttpAuthenticator, BasicAuth }
trait ApiV0Routing extends HttpService
with AuthDirectives with ClusterDirectives with PermissionDirectives with MetricDirectives
with UserRouting with MessagesRouting with AdminRouting with HealthRouting with SwaggerRouting
with JavabinMarshallingSupport with ApiV0RoutingContext {
import SprayEnrichments._
def apiTimer: Timer
// format: OFF
def apiVersion0Route(implicit t: Timeout, ec: ExecutionContext): Route = {
timedRoute(apiTimer) {
requireLoggedInOrAnonymous(apiAuth, userResolver, cookieUserKey, anonymousUser.username, secureCookies)(ec) {
user =>
pathEndOrSingleSlash {
requestUri {
uri =>
complete(Map(
"message" -> s"Hello there, ${user.username}",
"docs" -> uri.withChildPath("swagger").toString()))
}
} ~
pathPrefix("_admin") {
requirePermission("admin", user) {
adminRoute(user)
}
} ~
pathPrefix("_health") {
requirePermission("health", user) {
healthRoute
}
} ~
requireReachableQuorum() {
// we don't require permissions here, as they're handled by the user route
pathPrefix("users") {
userRoute(user)
} ~
pathPrefix("messages") {
requirePermission("messages", user) {
messagesRoute(user)
}
}
}
} ~
path("logout") {
logout(anonymousUser.username, cookieUserKey, secureCookies, Some(Uri("/")))
} ~
pathPrefix("_health" / "simple") {
simpleHealthRoute
} ~
pathPrefix("swagger") {
swaggerRoute
}
}
}
// format: ON
}
trait ApiV0RoutingContext {
def secureCookies: SecureCookies
def apiAuth(implicit t: Timeout, ec: ExecutionContext): HttpAuthenticator[User]
def userResolver(implicit t: Timeout, ec: ExecutionContext): (String => Future[User])
def cookieUserKey: String
def anonymousUser: User
}
trait ApiV0ServiceContext extends ApiV0RoutingContext {
def userService: ActorRef
import FutureEnrichments._
// authenticates an user by looking up and verifying the supplied password
override def apiAuth(implicit t: Timeout, ec: ExecutionContext): HttpAuthenticator[User] = {
BasicAuth(upo =>
upo.fold(Future.successful(Option.empty[User]))(userPass => {
AuthenticateUser(userPass.user, userPass.pass)
.request(userService)
.recoverAsFutureOptional
}),
realm = "javabin-rest-on-akka")
}
// resolves a username to the actual username object
override def userResolver(implicit t: Timeout, ec: ExecutionContext): (String => Future[User]) = { username =>
GetUser(username).request(userService)
}
}
|
nkvoll/javabin-rest-on-akka
|
src/main/scala/org/nkvoll/javabin/routing/ApiV0Routing.scala
|
Scala
|
mit
| 3,545 |
package scala.tools
package testing
import org.junit.Assert
import Assert._
import scala.runtime.ScalaRunTime.stringOf
import scala.collection.{ GenIterable, IterableLike }
import scala.collection.JavaConverters._
import scala.collection.mutable
import java.lang.ref._
import java.lang.reflect._
import java.util.IdentityHashMap
/** This module contains additional higher-level assert statements
* that are ultimately based on junit.Assert primitives.
*/
object AssertUtil {
private final val timeout = 60 * 1000L // wait a minute
private implicit class `ref helper`[A](val r: Reference[A]) extends AnyVal {
def isEmpty: Boolean = r.get == null
def nonEmpty: Boolean = !isEmpty
}
private implicit class `class helper`(val clazz: Class[_]) extends AnyVal {
def allFields: List[Field] = {
def loop(k: Class[_]): List[Field] =
if (k == null) Nil
else k.getDeclaredFields.toList ::: loop(k.getSuperclass)
loop(clazz)
}
}
private implicit class `field helper`(val f: Field) extends AnyVal {
def follow(o: AnyRef): AnyRef = {
f setAccessible true
f get o
}
}
/** Check that throwable T (or a subclass) was thrown during evaluation of `body`,
* and that its message satisfies the `checkMessage` predicate.
* Any other exception is propagated.
*/
def assertThrows[T <: Throwable](body: => Any,
checkMessage: String => Boolean = s => true)
(implicit manifest: Manifest[T]): Unit = {
try {
body
fail("Expression did not throw!")
} catch {
case e: Throwable if (manifest.runtimeClass isAssignableFrom e.getClass) &&
checkMessage(e.getMessage) =>
}
}
/** JUnit-style assertion for `IterableLike.sameElements`.
*/
def assertSameElements[A, B >: A](expected: IterableLike[A, _], actual: GenIterable[B], message: String = ""): Unit =
if (!(expected sameElements actual))
fail(
f"${ if (message.nonEmpty) s"$message " else "" }expected:<${ stringOf(expected) }> but was:<${ stringOf(actual) }>"
)
/** Convenient for testing iterators.
*/
def assertSameElements[A, B >: A](expected: IterableLike[A, _], actual: Iterator[B]): Unit =
assertSameElements(expected, actual.toList, "")
/** Value is not strongly reachable from roots after body is evaluated.
*/
def assertNotReachable[A <: AnyRef](a: => A, roots: AnyRef*)(body: => Unit): Unit = {
val wkref = new WeakReference(a)
def refs(root: AnyRef): mutable.Set[AnyRef] = {
val seen = new IdentityHashMap[AnyRef, Unit]
def loop(o: AnyRef): Unit =
if (wkref.nonEmpty && o != null && !seen.containsKey(o)) {
seen.put(o, ())
for {
f <- o.getClass.allFields
if !Modifier.isStatic(f.getModifiers)
if !f.getType.isPrimitive
if !classOf[Reference[_]].isAssignableFrom(f.getType)
} loop(f follow o)
}
loop(root)
seen.keySet.asScala
}
body
for (r <- roots if wkref.nonEmpty) {
assertFalse(s"Root $r held reference", refs(r) contains wkref.get)
}
}
}
|
jvican/scala
|
test/junit/scala/tools/testing/AssertUtil.scala
|
Scala
|
bsd-3-clause
| 3,220 |
/*
* Copyright 2011-2018 GatlingCorp (http://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.core.action
import io.gatling.AkkaSpec
import io.gatling.core.session.Session
import io.gatling.core.stats.StatsEngine
import io.gatling.core.stats.message.End
import io.gatling.core.stats.writer.UserMessage
class ExitSpec extends AkkaSpec {
"Exit" should "terminate the session and notify the Controller execution has ended" in {
val exit = new Exit(self, mock[StatsEngine])
var hasTerminated = false
val session = Session("scenario", 0, onExit = _ => hasTerminated = true)
exit ! session
hasTerminated shouldBe true
val userMessage = expectMsgType[UserMessage]
userMessage.session shouldBe session
userMessage.event shouldBe End
}
}
|
wiacekm/gatling
|
gatling-core/src/test/scala/io/gatling/core/action/ExitSpec.scala
|
Scala
|
apache-2.0
| 1,315 |
package notebook.front.widgets.magic
trait ExtraMagicImplicits {
}
|
dragos/spark-notebook
|
modules/common/src/main/pre-df/magic.scala
|
Scala
|
apache-2.0
| 67 |
package net.bhardy.braintree.scala
import net.bhardy.braintree.scala.util.NodeWrapper
import net.bhardy.braintree.scala.CreditCard._
import net.bhardy.braintree.scala.CreditCards.CardType
sealed abstract class CustomerLocation(override val toString:String)
object CustomerLocation {
case object INTERNATIONAL extends CustomerLocation("international")
case object US extends CustomerLocation("us")
case object UNRECOGNIZED extends CustomerLocation("unrecognized")
case object UNDEFINED extends CustomerLocation("undefined")
}
object CreditCard {
sealed abstract class KindIndicator(override val toString:String)
object KindIndicator {
case object YES extends KindIndicator("Yes")
case object NO extends KindIndicator("No")
case object UNKNOWN extends KindIndicator("Unknown")
def apply(s:String): KindIndicator = {
if (YES.toString.equalsIgnoreCase(s)) YES
else if (NO.toString.equalsIgnoreCase(s)) NO
else UNKNOWN
}
}
def lookupCardType(typeString: String): CreditCards.CardType = {
Option(typeString).map {
CardType.lookup(_).getOrElse(CardType.UNRECOGNIZED)
}.getOrElse {CardType.UNDEFINED}
}
}
class CreditCard(node: NodeWrapper) {
val token = node.findString("token")
val createdAt = node.findDateTime("created-at")
val updatedAt = node.findDateTime("updated-at")
val bin = node.findString("bin")
val cardType: CardType = lookupCardType(node.findString("card-type"))
val cardholderName = node.findString("cardholder-name")
val customerId = node.findString("customer-id")
val customerLocation = node.findString("customer-location")
val expirationMonth = node.findString("expiration-month")
val expirationYear = node.findString("expiration-year")
val imageUrl = node.findString("image-url")
val isDefault = node.findBooleanOpt("default").getOrElse(false)
val isVenmoSdk = node.findBooleanOpt("venmo-sdk").getOrElse(false)
val isExpired = node.findBooleanOpt("expired").getOrElse(false)
val last4 = node.findString("last-4")
val commercial = KindIndicator(node.findString("commercial"))
val debit = KindIndicator(node.findString("debit"))
val durbinRegulated = KindIndicator(node.findString("durbin-regulated"))
val healthcare = KindIndicator(node.findString("healthcare"))
val payroll = KindIndicator(node.findString("payroll"))
val prepaid = KindIndicator(node.findString("prepaid"))
val countryOfIssuance = node.findStringOpt("country-of-issuance").filter{!_.isEmpty}.getOrElse("Unknown")
val issuingBank = node.findStringOpt("issuing-bank").filter{!_.isEmpty}.getOrElse("Unknown")
val uniqueNumberIdentifier = node.findString("unique-number-identifier")
val billingAddressResponse: Option[NodeWrapper] = node.findFirstOpt("billing-address")
val billingAddress = billingAddressResponse.map { new Address(_) }
val subscriptions = node.findAll("subscriptions/subscription").map{ new Subscription(_) }
def expirationDate = expirationMonth + "/" + expirationYear
def maskedNumber = bin + "******" + last4
}
|
benhardy/braintree-scala
|
src/main/scala/CreditCard.scala
|
Scala
|
mit
| 3,047 |
import sbt.Keys._
import sbt._
import pl.project13.scala.sbt.JmhPlugin
import sbtunidoc.Plugin.unidocSettings
import scoverage.ScoverageSbtPlugin
object Util extends Build {
val branch = Process("git" :: "rev-parse" :: "--abbrev-ref" :: "HEAD" :: Nil).!!.trim
val suffix = if (branch == "master") "" else "-SNAPSHOT"
val libVersion = "6.25.0" + suffix
val zkVersion = "3.4.6"
val zkDependency = "org.apache.zookeeper" % "zookeeper" % zkVersion excludeAll(
ExclusionRule("com.sun.jdmk", "jmxtools"),
ExclusionRule("com.sun.jmx", "jmxri"),
ExclusionRule("javax.jms", "jms")
)
val parserCombinators = scalaVersion(sv => sv match {
case v: String if v startsWith "2.11" =>
Seq("org.scala-lang.modules" %% "scala-parser-combinators" % "1.0.2")
case _ =>
Nil
})
lazy val publishM2Configuration =
TaskKey[PublishConfiguration]("publish-m2-configuration",
"Configuration for publishing to the .m2 repository.")
lazy val publishM2 =
TaskKey[Unit]("publish-m2",
"Publishes artifacts to the .m2 repository.")
lazy val m2Repo =
Resolver.file("publish-m2-local",
Path.userHome / ".m2" / "repository")
val sharedSettings = Seq(
version := libVersion,
organization := "com.twitter",
scalaVersion := "2.10.5",
crossScalaVersions := Seq("2.10.5", "2.11.6"),
// Workaround for a scaladoc bug which causes it to choke on
// empty classpaths.
unmanagedClasspath in Compile += Attributed.blank(new java.io.File("doesnotexist")),
libraryDependencies ++= Seq(
"junit" % "junit" % "4.8.1" % "test",
"org.mockito" % "mockito-all" % "1.8.5" % "test",
"org.scalatest" %% "scalatest" % "2.2.4" % "test"
),
resolvers += "twitter repo" at "http://maven.twttr.com",
ScoverageSbtPlugin.ScoverageKeys.coverageHighlighting := (
CrossVersion.partialVersion(scalaVersion.value) match {
case Some((2, 10)) => false
case _ => true
}
),
publishM2Configuration <<= (packagedArtifacts, checksums in publish, ivyLoggingLevel) map { (arts, cs, level) =>
Classpaths.publishConfig(arts, None, resolverName = m2Repo.name, checksums = cs, logging = level)
},
publishM2 <<= Classpaths.publishTask(publishM2Configuration, deliverLocal),
otherResolvers += m2Repo,
scalacOptions ++= Seq("-encoding", "utf8"),
scalacOptions += "-deprecation",
javacOptions ++= Seq("-source", "1.7", "-target", "1.7"),
javacOptions in doc := Seq("-source", "1.7"),
// This is bad news for things like com.twitter.util.Time
parallelExecution in Test := false,
// Sonatype publishing
publishArtifact in Test := false,
pomIncludeRepository := { _ => false },
publishMavenStyle := true,
autoAPIMappings := true,
apiURL := Some(url("https://twitter.github.io/util/docs/")),
pomExtra := (
<url>https://github.com/twitter/util</url>
<licenses>
<license>
<name>Apache License, Version 2.0</name>
<url>http://www.apache.org/licenses/LICENSE-2.0</url>
</license>
</licenses>
<scm>
<url>[email protected]:twitter/util.git</url>
<connection>scm:git:[email protected]:twitter/util.git</connection>
</scm>
<developers>
<developer>
<id>twitter</id>
<name>Twitter Inc.</name>
<url>https://www.twitter.com/</url>
</developer>
</developers>),
publishTo <<= version { (v: String) =>
val nexus = "https://oss.sonatype.org/"
if (v.trim.endsWith("SNAPSHOT"))
Some("snapshots" at nexus + "content/repositories/snapshots")
else
Some("releases" at nexus + "service/local/staging/deploy/maven2")
}
)
lazy val util = Project(
id = "util",
base = file("."),
settings = Project.defaultSettings ++
sharedSettings ++
unidocSettings
) aggregate(
utilFunction, utilRegistry, utilCore, utilCodec, utilCollection, utilCache, utilReflect,
utilLogging, utilTest, utilThrift, utilHashing, utilJvm, utilZk,
utilZkCommon, utilZkTest, utilClassPreloader, utilBenchmark, utilApp,
utilEvents, utilStats, utilEval
)
lazy val utilApp = Project(
id = "util-app",
base = file("util-app"),
settings = Project.defaultSettings ++
sharedSettings
).settings(
name := "util-app"
).dependsOn(utilCore, utilRegistry)
lazy val utilBenchmark = Project(
id = "util-benchmark",
base = file("util-benchmark"),
settings = Project.defaultSettings ++
sharedSettings ++ JmhPlugin.projectSettings
)
.enablePlugins(JmhPlugin)
.settings(
name := "util-benchmark"
).dependsOn(utilCore, utilJvm, utilEvents)
lazy val utilCache = Project(
id = "util-cache",
base = file("util-cache"),
settings = Project.defaultSettings ++
sharedSettings
).settings(
name := "util-cache",
libraryDependencies ++= Seq(
// NB: guava has a `provided` dep on jsr/javax packages, so we include them manually
"com.google.code.findbugs" % "jsr305" % "1.3.9",
"com.google.guava" % "guava" % "16.0.1"
)
).dependsOn(utilCore)
lazy val utilClassPreloader = Project(
id = "util-class-preloader",
base = file("util-class-preloader"),
settings = Project.defaultSettings ++
sharedSettings
).settings(
name := "util-class-preloader"
).dependsOn(utilCore)
lazy val utilCodec = Project(
id = "util-codec",
base = file("util-codec"),
settings = Project.defaultSettings ++
sharedSettings
).settings(
name := "util-codec",
libraryDependencies ++= Seq(
"commons-codec" % "commons-codec" % "1.6"
)
).dependsOn(utilCore)
lazy val utilCollection = Project(
id = "util-collection",
base = file("util-collection"),
settings = Project.defaultSettings ++
sharedSettings
).settings(
name := "util-collection",
libraryDependencies ++= Seq(
// NB: guava has a `provided` dep on jsr/javax packages, so we include them manually
"com.google.code.findbugs" % "jsr305" % "1.3.9",
"javax.inject" % "javax.inject" % "1",
"com.google.guava" % "guava" % "16.0.1",
"commons-collections" % "commons-collections" % "3.2.1",
"org.scalacheck" %% "scalacheck" % "1.12.2" % "test"
)
).dependsOn(utilCore % "compile->compile;test->test")
lazy val utilCore = Project(
id = "util-core",
base = file("util-core"),
settings = Project.defaultSettings ++
sharedSettings
).settings(
name := "util-core",
libraryDependencies ++= Seq(
"com.twitter.common" % "objectsize" % "0.0.10" % "test",
"org.scalacheck" %% "scalacheck" % "1.12.2" % "test"
),
libraryDependencies <++= parserCombinators,
resourceGenerators in Compile <+=
(resourceManaged in Compile, name, version) map { (dir, name, ver) =>
val file = dir / "com" / "twitter" / name / "build.properties"
val buildRev = Process("git" :: "rev-parse" :: "HEAD" :: Nil).!!.trim
val buildName = new java.text.SimpleDateFormat("yyyyMMdd-HHmmss").format(new java.util.Date)
val contents = (
"name=%s\\nversion=%s\\nbuild_revision=%s\\nbuild_name=%s"
).format(name, ver, buildRev, buildName)
IO.write(file, contents)
Seq(file)
}
).dependsOn(utilFunction)
lazy val utilEval = Project(
id = "util-eval",
base = file("util-eval"),
settings = Project.defaultSettings ++
sharedSettings
).settings(
name := "util-eval",
libraryDependencies <+= scalaVersion {
"org.scala-lang" % "scala-compiler" % _ % "compile"
}
).dependsOn(utilCore)
lazy val utilEvents = Project(
id = "util-events",
base = file("util-events"),
settings = Project.defaultSettings ++
sharedSettings
).settings(
name := "util-events"
).dependsOn(utilApp)
lazy val utilFunction = Project(
id = "util-function",
base = file("util-function"),
settings = Project.defaultSettings ++
sharedSettings
).settings(
name := "util-function"
)
lazy val utilReflect = Project(
id = "util-reflect",
base = file("util-reflect"),
settings = Project.defaultSettings ++
sharedSettings
).settings(
name := "util-reflect",
libraryDependencies ++= Seq(
"asm" % "asm" % "3.3.1",
"asm" % "asm-util" % "3.3.1",
"asm" % "asm-commons" % "3.3.1",
"cglib" % "cglib" % "2.2"
)
).dependsOn(utilCore)
lazy val utilHashing = Project(
id = "util-hashing",
base = file("util-hashing"),
settings = Project.defaultSettings ++
sharedSettings
).settings(
name := "util-hashing",
libraryDependencies ++= Seq(
"commons-codec" % "commons-codec" % "1.6" % "test",
"org.scalacheck" %% "scalacheck" % "1.12.2" % "test"
)
).dependsOn(utilCore % "test")
lazy val utilJvm = Project(
id = "util-jvm",
base = file("util-jvm"),
settings = Project.defaultSettings ++
sharedSettings
).settings(
name := "util-jvm"
).dependsOn(utilApp, utilCore, utilTest % "test")
lazy val utilLogging = Project(
id = "util-logging",
base = file("util-logging"),
settings = Project.defaultSettings ++
sharedSettings
).settings(
name := "util-logging"
).dependsOn(utilCore, utilApp, utilStats)
lazy val utilRegistry = Project(
id = "util-registry",
base = file("util-registry"),
settings = Project.defaultSettings ++
sharedSettings
).settings(
name := "util-registry"
).dependsOn(utilCore)
lazy val utilStats = Project(
id = "util-stats",
base = file("util-stats"),
settings = Project.defaultSettings ++
sharedSettings
).settings(
name := "util-stats"
).dependsOn(utilCore)
lazy val utilTest = Project(
id = "util-test",
base = file("util-test"),
settings = Project.defaultSettings ++
sharedSettings
).settings(
name := "util-test",
libraryDependencies ++= Seq(
"org.scalatest" %% "scalatest" % "2.2.4",
"org.mockito" % "mockito-all" % "1.8.5"
)
).dependsOn(utilCore, utilLogging)
lazy val utilThrift = Project(
id = "util-thrift",
base = file("util-thrift"),
settings = Project.defaultSettings ++
sharedSettings
).settings(
name := "util-thrift",
libraryDependencies ++= Seq(
"thrift" % "libthrift" % "0.5.0",
"org.slf4j" % "slf4j-nop" % "1.5.8" % "provided",
"com.fasterxml.jackson.core" % "jackson-core" % "2.3.1",
"com.fasterxml.jackson.core" % "jackson-databind" % "2.3.1"
)
).dependsOn(utilCodec)
lazy val utilZk = Project(
id = "util-zk",
base = file("util-zk"),
settings = Project.defaultSettings ++
sharedSettings
).settings(
name := "util-zk",
libraryDependencies ++= Seq(
zkDependency
)
).dependsOn(utilCore, utilCollection, utilLogging)
lazy val utilZkCommon = Project(
id = "util-zk-common",
base = file("util-zk-common"),
settings = Project.defaultSettings ++
sharedSettings
).settings(
name := "util-zk-common",
libraryDependencies ++= Seq(
"com.twitter.common.zookeeper" % "client" % "0.0.60",
"com.twitter.common.zookeeper" % "group" % "0.0.78",
"com.twitter.common.zookeeper" % "server-set" % "1.0.83",
zkDependency
)
).dependsOn(utilCore, utilLogging, utilZk,
// These are dependended on to provide transitive dependencies
// that would otherwise cause incompatibilities. See above comment.
utilCollection, utilHashing
)
lazy val utilZkTest = Project(
id = "util-zk-test",
base = file("util-zk-test"),
settings = Project.defaultSettings ++
sharedSettings
).settings(
name := "util-zk-test",
libraryDependencies ++= Seq(
"com.twitter.common" % "io" % "0.0.58" % "test",
zkDependency
)
)
}
|
travisbrown/util
|
project/Build.scala
|
Scala
|
apache-2.0
| 12,128 |
package dotty.tools.dotc.config
import PathResolver.Defaults
class ScalaSettings extends Settings.SettingGroup {
protected def defaultClasspath = sys.env.getOrElse("CLASSPATH", ".")
/** Path related settings.
*/
val bootclasspath = PathSetting("-bootclasspath", "Override location of bootstrap class files.", Defaults.scalaBootClassPath)
val extdirs = PathSetting("-extdirs", "Override location of installed extensions.", Defaults.scalaExtDirs)
val javabootclasspath = PathSetting("-javabootclasspath", "Override java boot classpath.", Defaults.javaBootClassPath)
val javaextdirs = PathSetting("-javaextdirs", "Override java extdirs classpath.", Defaults.javaExtDirs)
val sourcepath = PathSetting("-sourcepath", "Specify location(s) of source files.", "") // Defaults.scalaSourcePath
/** Other settings.
*/
val dependencyfile = StringSetting("-dependencyfile", "file", "Set dependency tracking file.", ".scala_dependencies")
val deprecation = BooleanSetting("-deprecation", "Emit warning and location for usages of deprecated APIs.")
val migration = BooleanSetting("-migration", "Emit warning and location for migration issues from Scala 2.")
val encoding = StringSetting("-encoding", "encoding", "Specify character encoding used by source files.", Properties.sourceEncoding)
val explaintypes = BooleanSetting("-explaintypes", "Explain type errors in more detail.")
val feature = BooleanSetting("-feature", "Emit warning and location for usages of features that should be imported explicitly.")
val g = ChoiceSetting("-g", "level", "Set level of generated debugging info.", List("none", "source", "line", "vars", "notailcalls"), "vars")
val help = BooleanSetting("-help", "Print a synopsis of standard options")
val nowarn = BooleanSetting("-nowarn", "Generate no warnings.")
val print = BooleanSetting("-print", "Print program with Scala-specific features removed.")
val target = ChoiceSetting("-target", "target", "Target platform for object files. All JVM 1.5 targets are deprecated.",
List("jvm-1.5", "jvm-1.5-fjbg", "jvm-1.5-asm", "jvm-1.6", "jvm-1.7", "jvm-1.8", "msil"),
"jvm-1.8")
val unchecked = BooleanSetting("-unchecked", "Enable additional warnings where generated code depends on assumptions.")
val uniqid = BooleanSetting("-uniqid", "Uniquely tag all identifiers in debugging output.")
val usejavacp = BooleanSetting("-usejavacp", "Utilize the java.class.path in classpath resolution.")
val verbose = BooleanSetting("-verbose", "Output messages about what the compiler is doing.")
val version = BooleanSetting("-version", "Print product version and exit.")
val pageWidth = IntSetting("-pagewidth", "Set page width", 80)
val jvmargs = PrefixSetting("-J<flag>", "-J", "Pass <flag> directly to the runtime system.")
val defines = PrefixSetting("-Dproperty=value", "-D", "Pass -Dproperty=value directly to the runtime system.")
val toolcp = PathSetting("-toolcp", "Add to the runner classpath.", "")
val nobootcp = BooleanSetting("-nobootcp", "Do not use the boot classpath for the scala jars.")
val strict = BooleanSetting("-strict", "Use strict type rules, which means some formerly legal code does not typecheck anymore.")
val argfiles = BooleanSetting("@<file>", "A text file containing compiler arguments (options and source files)")
val classpath = PathSetting("-classpath", "Specify where to find user class files.", defaultClasspath) withAbbreviation "-cp"
val d = StringSetting("-d", "directory|jar", "destination for generated classfiles.", ".")
val nospecialization = BooleanSetting("-no-specialization", "Ignore @specialize annotations.")
val language = MultiStringSetting("-language", "feature", "Enable one or more language features.")
/** -X "Advanced" settings
*/
val Xhelp = BooleanSetting("-X", "Print a synopsis of advanced options.")
val assemname = StringSetting("-Xassem-name", "file", "(Requires -target:msil) Name of the output assembly.", "").dependsOn(target, "msil")
val assemrefs = StringSetting("-Xassem-path", "path", "(Requires -target:msil) List of assemblies referenced by the program.", ".").dependsOn(target, "msil")
val assemextdirs = StringSetting("-Xassem-extdirs", "dirs", "(Requires -target:msil) List of directories containing assemblies. default:lib", Defaults.scalaLibDir.path).dependsOn(target, "msil")
val sourcedir = StringSetting("-Xsourcedir", "directory", "(Requires -target:msil) Mirror source folder structure in output directory.", ".").dependsOn(target, "msil")
val checkInit = BooleanSetting("-Xcheckinit", "Wrap field accessors to throw an exception on uninitialized access.")
val noassertions = BooleanSetting("-Xdisable-assertions", "Generate no assertions or assumptions.")
// val elidebelow = IntSetting("-Xelide-below", "Calls to @elidable methods are omitted if method priority is lower than argument",
// elidable.MINIMUM, None, elidable.byName get _)
val noForwarders = BooleanSetting("-Xno-forwarders", "Do not generate static forwarders in mirror classes.")
val genPhaseGraph = StringSetting("-Xgenerate-phase-graph", "file", "Generate the phase graphs (outputs .dot files) to fileX.dot.", "")
val XlogImplicits = BooleanSetting("-Xlog-implicits", "Show more detail on why some implicits are not applicable.")
val XminImplicitSearchDepth = IntSetting("-Xmin-implicit-search-depth", "Set number of levels of implicit searches undertaken before checking for divergence.", 5)
val logImplicitConv = BooleanSetting("-Xlog-implicit-conversions", "Print a message whenever an implicit conversion is inserted.")
val logReflectiveCalls = BooleanSetting("-Xlog-reflective-calls", "Print a message when a reflective method call is generated")
val logFreeTerms = BooleanSetting("-Xlog-free-terms", "Print a message when reification creates a free term.")
val logFreeTypes = BooleanSetting("-Xlog-free-types", "Print a message when reification resorts to generating a free type.")
val maxClassfileName = IntSetting("-Xmax-classfile-name", "Maximum filename length for generated classes", 255, 72 to 255)
val Xmigration = VersionSetting("-Xmigration", "Warn about constructs whose behavior may have changed since version.")
val Xsource = VersionSetting("-Xsource", "Treat compiler input as Scala source for the specified version.")
val Xnojline = BooleanSetting("-Xnojline", "Do not use JLine for editing.")
val Xverify = BooleanSetting("-Xverify", "Verify generic signatures in generated bytecode (asm backend only.)")
val plugin = MultiStringSetting("-Xplugin", "file", "Load one or more plugins from files.")
val disable = MultiStringSetting("-Xplugin-disable", "plugin", "Disable the given plugin(s).")
val showPlugins = BooleanSetting("-Xplugin-list", "Print a synopsis of loaded plugins.")
val require = MultiStringSetting("-Xplugin-require", "plugin", "Abort unless the given plugin(s) are available.")
val pluginsDir = StringSetting("-Xpluginsdir", "path", "Path to search compiler plugins.", Defaults.scalaPluginPath)
val Xprint = PhasesSetting("-Xprint", "Print out program after")
val writeICode = PhasesSetting("-Xprint-icode", "Log internal icode to *.icode files after", "icode")
val Xprintpos = BooleanSetting("-Xprint-pos", "Print tree positions, as offsets.")
val printtypes = BooleanSetting("-Xprint-types", "Print tree types (debugging option).")
val prompt = BooleanSetting("-Xprompt", "Display a prompt after each error (debugging option).")
val script = StringSetting("-Xscript", "object", "Treat the source file as a script and wrap it in a main method.", "")
val mainClass = StringSetting("-Xmain-class", "path", "Class for manifest's Main-Class entry (only useful with -d <jar>)", "")
val Xshowcls = StringSetting("-Xshow-class", "class", "Show internal representation of class.", "")
val Xshowobj = StringSetting("-Xshow-object", "object", "Show internal representation of object.", "")
val showPhases = BooleanSetting("-Xshow-phases", "Print a synopsis of compiler phases.")
val sourceReader = StringSetting("-Xsource-reader", "classname", "Specify a custom method for reading source files.", "")
val XnoValueClasses = BooleanSetting("-Xno-value-classes", "Do not use value classes. Helps debugging.")
val XreplLineWidth = IntSetting("-Xrepl-line-width", "Maximial number of columns per line for REPL output", 390)
val XoldPatmat = BooleanSetting("-Xoldpatmat", "Use the pre-2.10 pattern matcher. Otherwise, the 'virtualizing' pattern matcher is used in 2.10.")
val XnoPatmatAnalysis = BooleanSetting("-Xno-patmat-analysis", "Don't perform exhaustivity/unreachability analysis. Also, ignore @switch annotation.")
val XfullLubs = BooleanSetting("-Xfull-lubs", "Retains pre 2.10 behavior of less aggressive truncation of least upper bounds.")
/** -Y "Private" settings
*/
val overrideObjects = BooleanSetting("-Yoverride-objects", "Allow member objects to be overridden.")
val overrideVars = BooleanSetting("-Yoverride-vars", "Allow vars to be overridden.")
val Yhelp = BooleanSetting("-Y", "Print a synopsis of private options.")
val browse = PhasesSetting("-Ybrowse", "Browse the abstract syntax tree after")
val Ycheck = PhasesSetting("-Ycheck", "Check the tree at the end of")
val YcheckMods = BooleanSetting("-Ycheck-mods", "Check that symbols and their defining trees have modifiers in sync")
val YcheckTypedTrees = BooleanSetting("-YcheckTypedTrees", "Check all constructured typed trees for type correctness")
val Yshow = PhasesSetting("-Yshow", "(Requires -Xshow-class or -Xshow-object) Show after")
val Xcloselim = BooleanSetting("-Yclosure-elim", "Perform closure elimination.")
val Ycompacttrees = BooleanSetting("-Ycompact-trees", "Use compact tree printer when displaying trees.")
val noCompletion = BooleanSetting("-Yno-completion", "Disable tab-completion in the REPL.")
val Xdce = BooleanSetting("-Ydead-code", "Perform dead code elimination.")
val debug = BooleanSetting("-Ydebug", "Increase the quantity of debugging output.")
val debugNames = BooleanSetting("-YdebugNames", "Show name-space indicators when printing names")
val debugTrace = BooleanSetting("-Ydebug-trace", "Trace core operations")
val debugFlags = BooleanSetting("-Ydebug-flags", "Print all flags of definitions")
val debugOwners = BooleanSetting("-Ydebug-owners", "Print all owners of definitions (requires -Yprint-syms)")
//val doc = BooleanSetting ("-Ydoc", "Generate documentation")
val termConflict = ChoiceSetting("-Yresolve-term-conflict", "strategy", "Resolve term conflicts", List("package", "object", "error"), "error")
val inline = BooleanSetting("-Yinline", "Perform inlining when possible.")
val inlineHandlers = BooleanSetting("-Yinline-handlers", "Perform exception handler inlining when possible.")
val YinlinerWarnings = BooleanSetting("-Yinline-warnings", "Emit inlining warnings. (Normally surpressed due to high volume)")
val Xlinearizer = ChoiceSetting("-Ylinearizer", "which", "Linearizer to use", List("normal", "dfs", "rpo", "dump"), "rpo")
val log = PhasesSetting("-Ylog", "Log operations during")
val Ylogcp = BooleanSetting("-Ylog-classpath", "Output information about what classpath is being applied.")
val Ynogenericsig = BooleanSetting("-Yno-generic-signatures", "Suppress generation of generic signatures for Java.")
val YnoImports = BooleanSetting("-Yno-imports", "Compile without importing scala.*, java.lang.*, or Predef.")
val nopredef = BooleanSetting("-Yno-predef", "Compile without importing Predef.")
val noAdaptedArgs = BooleanSetting("-Yno-adapted-args", "Do not adapt an argument list (either by inserting () or creating a tuple) to match the receiver.")
val selfInAnnots = BooleanSetting("-Yself-in-annots", "Include a \\"self\\" identifier inside of annotations.")
val Xshowtrees = BooleanSetting("-Yshow-trees", "(Requires -Xprint:) Print detailed ASTs in formatted form.")
val XshowtreesCompact = BooleanSetting("-Yshow-trees-compact", "(Requires -Xprint:) Print detailed ASTs in compact form.")
val XshowtreesStringified = BooleanSetting("-Yshow-trees-stringified", "(Requires -Xprint:) Print stringifications along with detailed ASTs.")
val Yshowsyms = BooleanSetting("-Yshow-syms", "Print the AST symbol hierarchy after each phase.")
val Yshowsymkinds = BooleanSetting("-Yshow-symkinds", "Print abbreviated symbol kinds next to symbol names.")
val Yskip = PhasesSetting("-Yskip", "Skip")
val Ygenjavap = StringSetting("-Ygen-javap", "dir", "Generate a parallel output directory of .javap files.", "")
val Ydumpclasses = StringSetting("-Ydump-classes", "dir", "Dump the generated bytecode to .class files (useful for reflective compilation that utilizes in-memory classloaders).", "")
val Ynosqueeze = BooleanSetting("-Yno-squeeze", "Disable creation of compact code in matching.")
val YstopAfter = PhasesSetting("-Ystop-after", "Stop after") withAbbreviation ("-stop") // backward compat
val YstopBefore = PhasesSetting("-Ystop-before", "Stop before") // stop before erasure as long as we have not debugged it fully
val refinementMethodDispatch = ChoiceSetting("-Ystruct-dispatch", "policy", "structural method dispatch policy", List("no-cache", "mono-cache", "poly-cache", "invoke-dynamic"), "poly-cache")
val Yrangepos = BooleanSetting("-Yrangepos", "Use range positions for syntax trees.")
val Ybuilderdebug = ChoiceSetting("-Ybuilder-debug", "manager", "Compile using the specified build manager.", List("none", "refined", "simple"), "none")
val Yreifycopypaste = BooleanSetting("-Yreify-copypaste", "Dump the reified trees in copypasteable representation.")
val Yreplsync = BooleanSetting("-Yrepl-sync", "Do not use asynchronous code for repl startup")
val YmethodInfer = BooleanSetting("-Yinfer-argument-types", "Infer types for arguments of overriden methods.")
val etaExpandKeepsStar = BooleanSetting("-Yeta-expand-keeps-star", "Eta-expand varargs methods to T* rather than Seq[T]. This is a temporary option to ease transition.")
val Yinvalidate = StringSetting("-Yinvalidate", "classpath-entry", "Invalidate classpath entry before run", "")
val noSelfCheck = BooleanSetting("-Yno-self-type-checks", "Suppress check for self-type conformance among inherited members.")
val YtraceContextCreation = BooleanSetting("-Ytrace-context-creation", "Store stack trace of context creations.")
val YshowSuppressedErrors = BooleanSetting("-Yshow-suppressed-errors", "Also show follow-on errors and warnings that are normally supressed.")
val Yheartbeat = BooleanSetting("-Yheartbeat", "show heartbeat stack trace of compiler operations.")
val Yprintpos = BooleanSetting("-Yprintpos", "show tree positions.")
val YnoDeepSubtypes = BooleanSetting("-Yno-deep-subtypes", "throw an exception on deep subtyping call stacks.")
val YplainPrinter = BooleanSetting("-Yplain-printer", "Pretty-print using a plain printer.")
val YprintSyms = BooleanSetting("-Yprint-syms", "when printing trees print info in symbols instead of corresponding info in trees.")
val YtestPickler = BooleanSetting("-Ytest-pickler", "self-test for pickling functionality; should be used with -Ystop-after:pickler")
val YcheckReentrant = BooleanSetting("-Ycheck-reentrant", "check that compiled program does not contain vars that can be accessed from a global root.")
def stop = YstopAfter
/** Area-specific debug output.
*/
val Ybuildmanagerdebug = BooleanSetting("-Ybuild-manager-debug", "Generate debug information for the Refined Build Manager compiler.")
val Ycompletion = BooleanSetting("-Ycompletion-debug", "Trace all tab completion activity.")
val Ydocdebug = BooleanSetting("-Ydoc-debug", "Trace all scaladoc activity.")
val Yidedebug = BooleanSetting("-Yide-debug", "Generate, validate and output trees using the interactive compiler.")
val Yinferdebug = BooleanSetting("-Yinfer-debug", "Trace type inference and implicit search.")
val Yissuedebug = BooleanSetting("-Yissue-debug", "Print stack traces when a context issues an error.")
val YmacrodebugLite = BooleanSetting("-Ymacro-debug-lite", "Trace essential macro-related activities.")
val YmacrodebugVerbose = BooleanSetting("-Ymacro-debug-verbose", "Trace all macro-related activities: compilation, generation of synthetics, classloading, expansion, exceptions.")
val Ypmatdebug = BooleanSetting("-Ypmat-debug", "Trace all pattern matcher activity.")
val Yposdebug = BooleanSetting("-Ypos-debug", "Trace position validation.")
val Yreifydebug = BooleanSetting("-Yreify-debug", "Trace reification.")
val Yrepldebug = BooleanSetting("-Yrepl-debug", "Trace all repl activity.")
val Ytyperdebug = BooleanSetting("-Ytyper-debug", "Trace all type assignments.")
val Ypatmatdebug = BooleanSetting("-Ypatmat-debug", "Trace pattern matching translation.")
val Yexplainlowlevel = BooleanSetting("-Yexplain-lowlevel", "When explaining type errors, show types at a lower level.")
val YnoDoubleBindings = BooleanSetting("-Yno-double-bindings", "Assert no namedtype is bound twice (should be enabled only if program is error-free).")
val YshowVarBounds = BooleanSetting("-Yshow-var-bounds", "Print type variables with their bounds")
val optimise = BooleanSetting("-optimise", "Generates faster bytecode by applying optimisations to the program") withAbbreviation "-optimize"
/** IDE-specific settings
*/
val YpresentationVerbose = BooleanSetting("-Ypresentation-verbose", "Print information about presentation compiler tasks.")
val YpresentationDebug = BooleanSetting("-Ypresentation-debug", "Enable debugging output for the presentation compiler.")
val YpresentationStrict = BooleanSetting("-Ypresentation-strict", "Do not report type errors in sources with syntax errors.")
val YpresentationLog = StringSetting("-Ypresentation-log", "file", "Log presentation compiler events into file", "")
val YpresentationReplay = StringSetting("-Ypresentation-replay", "file", "Replay presentation compiler events from file", "")
val YpresentationDelay = IntSetting("-Ypresentation-delay", "Wait number of ms after typing before starting typechecking", 0, 0 to 999)
}
|
densh/dotty
|
src/dotty/tools/dotc/config/ScalaSettings.scala
|
Scala
|
bsd-3-clause
| 18,178 |
package controllers
import java.util.{Date, UUID}
import javax.inject.{Inject, Singleton}
import akka.stream.Materializer
import akka.stream.scaladsl.{FileIO, Sink}
import akka.util.ByteString
import models._
import net.fortuna.ical4j.model.property.Uid
import net.fortuna.ical4j.model.{Calendar, Dur}
import org.joda.time.DateTime
import play.api.Logger
import play.api.http.HttpEntity
import play.api.i18n.{Lang, Langs, MessagesApi}
import play.api.libs.Files.DefaultTemporaryFileCreator
import play.api.libs.json.Json
import play.api.libs.ws._
import play.api.mvc._
import play.modules.reactivemongo.{ReactiveMongoApi, ReactiveMongoComponents}
import reactivemongo.bson.Macros.handler
import reactivemongo.bson.{BSONDocumentHandler, document}
import security.Secured
import services.{ImportService, MongoService, RegionService, UserService}
import scala.concurrent.{ExecutionContext, Future}
trait JsonFormats {
// Generates Writes and Reads for Feed and User thanks to Json Macros
implicit val rankingFormat = Json.format[Ranking]
implicit val matchFormat = Json.format[Match]
implicit val regionFormat = Json.format[Region]
}
trait BsonFormats {
implicit val matchHandler: BSONDocumentHandler[Match] = handler[Match]
implicit val rankingHandler: BSONDocumentHandler[Ranking] = handler[Ranking]
implicit val inputFileFormat: BSONDocumentHandler[InputFile] = handler[InputFile]
implicit val userFormat: BSONDocumentHandler[User] = handler[User]
}
@Singleton
class ApiController @Inject()(langs: Langs, messagesApi: MessagesApi,
val reactiveMongoApi: ReactiveMongoApi,
val cc: ControllerComponents,
regionService: RegionService,
val userService: UserService,
ws: WSClient,
temporaryFileCreator: DefaultTemporaryFileCreator,
importService: ImportService,
implicit val mat: Materializer,
implicit val ec: ExecutionContext
) extends AbstractController(cc) with ReactiveMongoComponents with JsonFormats with BsonFormats with Secured {
private val rankingsDao: MongoService[Ranking] = new MongoService[Ranking](reactiveMongoApi.database, "rankings")
private val matchesDao: MongoService[Match] = new MongoService[Match](reactiveMongoApi.database, "matches")
def seasons() = Action {
Ok(Json.toJson(List(Map("name" -> "1718"))))
}
def availableRankingsForRegion(season: String, region: String): Action[AnyContent] = Action.async { implicit request: Request[AnyContent] =>
rankingsDao.distinct(
"division",
Option(document("season" -> season, "region" -> region))
).map { s: List[String] =>
Ok(Json.toJson(s))
}
}
def rankings(season: String, region: String, division: String): Action[AnyContent] = Action.async { implicit request: Request[AnyContent] =>
rankingsDao.find(
document("season" -> season, "region" -> region, "division" -> division)
).map { rankings: Seq[Ranking] =>
Ok(Json.toJson(rankings))
}
}
def availablePeriodsForDivision(season: String, region: String, division: String): Action[AnyContent] = Action.async {
rankingsDao.distinct(
"period",
Option(document("season" -> season, "region" -> region, "division" -> division))
).map { s: List[String] => Ok(Json.toJson(s)) }
}
def rankingForDivisionAndPeriod(season: String, region: String, division: String, period: String): Action[AnyContent] = Action.async { implicit request: Request[AnyContent] =>
rankingsDao.find(
document("season" -> season, "region" -> region, "division" -> division, "period" -> period)
).map { rankings: Seq[Ranking] =>
Ok(Json.toJson(rankings))
}
}
def matches(season: String, region: String, division: String): Action[AnyContent] = Action.async { implicit request: Request[AnyContent] =>
matchesDao.find(
document("season" -> season, "region" -> region, "division" -> division)
).map { matches: Seq[Match] =>
Ok(Json.toJson(matches))
}
}
def matchesForMatchDay(season: String, region: String, division: String, matchDay: Long): Action[AnyContent] = Action.async { implicit request: Request[AnyContent] =>
matchesDao.find(
document("season" -> season, "region" -> region, "division" -> division, "matchDay" -> matchDay)
).map { rankings: Seq[Match] =>
Ok(Json.toJson(rankings))
}
}
def matchesForTeamMatchDay(season: String, region: String, division: String, regNumber: String): Action[AnyContent] = Action.async { implicit request: Request[AnyContent] =>
matchesDao.find(
document(
"season" -> season,
"region" -> region,
"division" -> division,
)
).map { matches: Seq[Match] =>
val matchesForNextMatchday = matches
.filter { m => m.regNumberHome == regNumber || m.regNumberAway == regNumber }
.filter { m => m.dateTime after new Date() }
.sortBy(_.dateTime.getTime).headOption.map(_.matchDay)
.map { matchDay =>
matches.filter(_.matchDay == matchDay)
}
Ok(Json.toJson(matchesForNextMatchday))
}
}
def regions(season: String) = Action {
Ok(Json.toJson(regionService.regions))
}
def previousMatches(season: String, regNumber: String): Action[AnyContent] = Action.async { implicit request: Request[AnyContent] =>
matchesDao.find(
document("season" -> season, "$or" -> reactivemongo.bson.array(
document("regNumberHome" -> regNumber),
document("regNumberAway" -> regNumber)
))
).map { matches: Seq[Match] =>
val previousMatches = matches.groupBy(_.division).flatMap { case (division, matches) =>
matches.filter { m =>
val now = new Date()
(m.dateTime before now) && (m.dateTime after new DateTime(now).minusMonths(1).toDate)
}.sortBy(-_.dateTime.getTime).headOption
}
Ok(Json.toJson(previousMatches))
}
}
def upcomingMatches(season: String, regNumber: String) = Action.async { implicit request: Request[AnyContent] =>
matchesDao.find(
document("season" -> season, "$or" -> reactivemongo.bson.array(
document("regNumberHome" -> regNumber),
document("regNumberAway" -> regNumber)
))
).map { matches: Seq[Match] =>
val upcomingMatches = matches.groupBy(_.division).flatMap { case (division, matches) =>
matches.filter { m =>
m.dateTime after new Date()
}.sortBy(_.dateTime.getTime).headOption
}
Ok(Json.toJson(upcomingMatches))
}
}
def logo(regNumber: String): Action[AnyContent] = Action.async { implicit request: Request[AnyContent] =>
val tempFile = new java.io.File(s"logo/$regNumber.jpeg")
if (tempFile.exists()) {
val source = FileIO.fromPath(tempFile.toPath)
Future(Result(
header = ResponseHeader(200, Map.empty),
body = HttpEntity.Streamed(source, None, Some("image/jpeg"))
))
} else {
tempFile.getParentFile.mkdirs()
val url = s"http://static.belgianfootball.be/project/publiek/clublogo/$regNumber.jpg"
ws.url(url).get().flatMap { response: WSResponse =>
val outputStream = java.nio.file.Files.newOutputStream(tempFile.toPath)
// The sink that writes to the output stream
val sink = Sink.foreach[ByteString] { bytes =>
outputStream.write(bytes.toArray)
}
response.bodyAsSource.runWith(sink)
}.map { done =>
val source = FileIO.fromPath(tempFile.toPath)
Result(
header = ResponseHeader(200, Map.empty),
body = HttpEntity.Streamed(source, None, Some("image/jpeg"))
)
}
}
}
def matchesCalendar(season: String, regNumber: String, side: String) = Action.async { implicit request: Request[AnyContent] =>
val lang: Lang = langs.availables.head
val orQuery = side match {
case "home" => reactivemongo.bson.array(document("regNumberHome" -> regNumber))
case "away" => reactivemongo.bson.array(document("regNumberAway" -> regNumber))
case _ => reactivemongo.bson.array(document("regNumberHome" -> regNumber), document("regNumberAway" -> regNumber)
)
}
matchesDao.find(
document("season" -> season, "$or" -> orQuery)
).map { matches: Seq[Match] =>
import net.fortuna.ical4j.model.component.VEvent
import net.fortuna.ical4j.model.property.{CalScale, ProdId, Version}
val calendar = new Calendar()
calendar.getProperties.add(new ProdId(s"-//Footbalisto//Upcoming matches for $regNumber//EN"))
calendar.getProperties.add(Version.VERSION_2_0)
calendar.getProperties.add(CalScale.GREGORIAN)
matches.foreach { m: Match =>
val summary = if (!m.status.isEmpty) {
s"[${m.division}] ${m.home} vs ${m.away} --- ${messagesApi(s"match.status.${m.status}")(lang)} ---"
} else {
(for {
resultHome <- m.resultHome
resultAway <- m.resultAway
} yield {
s"[${m.division}] ${m.home} [ $resultHome-$resultAway ] ${m.away}"
}).getOrElse(s"[${m.division}] ${m.home} vs ${m.away}")
}
val matchEvent = new VEvent(new net.fortuna.ical4j.model.DateTime(m.dateTime), new Dur("PT105M"), summary)
matchEvent.getProperties.add(new Uid(UUID.randomUUID().toString))
calendar.getComponents.add(matchEvent)
}
Ok(calendar.toString).as("text/calendar")
}
}
def authenticated(): EssentialAction = authenticatedRequest { request =>
Logger.info("in the authenticated request")
Ok("Authenticated")
}
}
|
soniCaH/footbalisto-api
|
app/controllers/ApiController.scala
|
Scala
|
apache-2.0
| 9,810 |
package model.pokedex
import org.scalatest.{FlatSpec, Matchers}
/**
* Created by salim on 12/09/2016.
*/
class MonsterSpec extends FlatSpec with Matchers {
}
|
salimfadhley/scalamoo
|
src/test/scala/model/pokedex/MonsterSpec.scala
|
Scala
|
mit
| 165 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler
import java.io.{Externalizable, ObjectInput, ObjectOutput}
import java.util.concurrent.Semaphore
import scala.collection.JavaConverters._
import scala.collection.mutable
import org.mockito.Mockito
import org.scalatest.Matchers
import org.apache.spark._
import org.apache.spark.executor.TaskMetrics
import org.apache.spark.internal.config._
import org.apache.spark.internal.config.Network.RPC_MESSAGE_MAX_SIZE
import org.apache.spark.metrics.MetricsSystem
import org.apache.spark.util.{ResetSystemProperties, RpcUtils}
class SparkListenerSuite extends SparkFunSuite with LocalSparkContext with Matchers
with ResetSystemProperties {
import LiveListenerBus._
val jobCompletionTime = 1421191296660L
private val mockSparkContext: SparkContext = Mockito.mock(classOf[SparkContext])
private val mockMetricsSystem: MetricsSystem = Mockito.mock(classOf[MetricsSystem])
private def numDroppedEvents(bus: LiveListenerBus): Long = {
bus.metrics.metricRegistry.counter(s"queue.$SHARED_QUEUE.numDroppedEvents").getCount
}
private def sharedQueueSize(bus: LiveListenerBus): Int = {
bus.metrics.metricRegistry.getGauges().get(s"queue.$SHARED_QUEUE.size").getValue()
.asInstanceOf[Int]
}
private def eventProcessingTimeCount(bus: LiveListenerBus): Long = {
bus.metrics.metricRegistry.timer(s"queue.$SHARED_QUEUE.listenerProcessingTime").getCount()
}
test("don't call sc.stop in listener") {
sc = new SparkContext("local", "SparkListenerSuite", new SparkConf())
val listener = new SparkContextStoppingListener(sc)
sc.listenerBus.addToSharedQueue(listener)
sc.listenerBus.post(SparkListenerJobEnd(0, jobCompletionTime, JobSucceeded))
sc.listenerBus.waitUntilEmpty()
sc.stop()
assert(listener.sparkExSeen)
}
test("basic creation and shutdown of LiveListenerBus") {
val conf = new SparkConf()
val counter = new BasicJobCounter
val bus = new LiveListenerBus(conf)
// Metrics are initially empty.
assert(bus.metrics.numEventsPosted.getCount === 0)
assert(numDroppedEvents(bus) === 0)
assert(bus.queuedEvents.size === 0)
assert(eventProcessingTimeCount(bus) === 0)
// Post five events:
(1 to 5).foreach { _ => bus.post(SparkListenerJobEnd(0, jobCompletionTime, JobSucceeded)) }
// Five messages should be marked as received and queued, but no messages should be posted to
// listeners yet because the the listener bus hasn't been started.
assert(bus.metrics.numEventsPosted.getCount === 5)
assert(bus.queuedEvents.size === 5)
// Add the counter to the bus after messages have been queued for later delivery.
bus.addToSharedQueue(counter)
assert(counter.count === 0)
// Starting listener bus should flush all buffered events
bus.start(mockSparkContext, mockMetricsSystem)
Mockito.verify(mockMetricsSystem).registerSource(bus.metrics)
bus.waitUntilEmpty()
assert(counter.count === 5)
assert(sharedQueueSize(bus) === 0)
assert(eventProcessingTimeCount(bus) === 5)
// After the bus is started, there should be no more queued events.
assert(bus.queuedEvents === null)
// After listener bus has stopped, posting events should not increment counter
bus.stop()
(1 to 5).foreach { _ => bus.post(SparkListenerJobEnd(0, jobCompletionTime, JobSucceeded)) }
assert(counter.count === 5)
assert(eventProcessingTimeCount(bus) === 5)
// Listener bus must not be started twice
intercept[IllegalStateException] {
val bus = new LiveListenerBus(conf)
bus.start(mockSparkContext, mockMetricsSystem)
bus.start(mockSparkContext, mockMetricsSystem)
}
// ... or stopped before starting
intercept[IllegalStateException] {
val bus = new LiveListenerBus(conf)
bus.stop()
}
}
test("bus.stop() waits for the event queue to completely drain") {
@volatile var drained = false
// When Listener has started
val listenerStarted = new Semaphore(0)
// Tells the listener to stop blocking
val listenerWait = new Semaphore(0)
// When stopper has started
val stopperStarted = new Semaphore(0)
// When stopper has returned
val stopperReturned = new Semaphore(0)
class BlockingListener extends SparkListener {
override def onJobEnd(jobEnd: SparkListenerJobEnd): Unit = {
listenerStarted.release()
listenerWait.acquire()
drained = true
}
}
val bus = new LiveListenerBus(new SparkConf())
val blockingListener = new BlockingListener
bus.addToSharedQueue(blockingListener)
bus.start(mockSparkContext, mockMetricsSystem)
bus.post(SparkListenerJobEnd(0, jobCompletionTime, JobSucceeded))
listenerStarted.acquire()
// Listener should be blocked after start
assert(!drained)
new Thread("ListenerBusStopper") {
override def run(): Unit = {
stopperStarted.release()
// stop() will block until notify() is called below
bus.stop()
stopperReturned.release()
}
}.start()
stopperStarted.acquire()
// Listener should remain blocked after stopper started
assert(!drained)
// unblock Listener to let queue drain
listenerWait.release()
stopperReturned.acquire()
assert(drained)
}
test("metrics for dropped listener events") {
val bus = new LiveListenerBus(new SparkConf().set(LISTENER_BUS_EVENT_QUEUE_CAPACITY, 1))
val listenerStarted = new Semaphore(0)
val listenerWait = new Semaphore(0)
bus.addToSharedQueue(new SparkListener {
override def onJobEnd(jobEnd: SparkListenerJobEnd): Unit = {
listenerStarted.release()
listenerWait.acquire()
}
})
bus.start(mockSparkContext, mockMetricsSystem)
// Post a message to the listener bus and wait for processing to begin:
bus.post(SparkListenerJobEnd(0, jobCompletionTime, JobSucceeded))
listenerStarted.acquire()
assert(sharedQueueSize(bus) === 0)
assert(numDroppedEvents(bus) === 0)
// If we post an additional message then it should remain in the queue because the listener is
// busy processing the first event:
bus.post(SparkListenerJobEnd(0, jobCompletionTime, JobSucceeded))
assert(sharedQueueSize(bus) === 1)
assert(numDroppedEvents(bus) === 0)
// The queue is now full, so any additional events posted to the listener will be dropped:
bus.post(SparkListenerJobEnd(0, jobCompletionTime, JobSucceeded))
assert(sharedQueueSize(bus) === 1)
assert(numDroppedEvents(bus) === 1)
// Allow the the remaining events to be processed so we can stop the listener bus:
listenerWait.release(2)
bus.stop()
}
test("basic creation of StageInfo") {
sc = new SparkContext("local", "SparkListenerSuite")
val listener = new SaveStageAndTaskInfo
sc.addSparkListener(listener)
val rdd1 = sc.parallelize(1 to 100, 4)
val rdd2 = rdd1.map(_.toString)
rdd2.setName("Target RDD")
rdd2.count()
sc.listenerBus.waitUntilEmpty()
listener.stageInfos.size should be {1}
val (stageInfo, taskInfoMetrics) = listener.stageInfos.head
stageInfo.rddInfos.size should be {2}
stageInfo.rddInfos.forall(_.numPartitions == 4) should be {true}
stageInfo.rddInfos.exists(_.name == "Target RDD") should be {true}
stageInfo.numTasks should be {4}
stageInfo.submissionTime should be (Symbol("defined"))
stageInfo.completionTime should be (Symbol("defined"))
taskInfoMetrics.length should be {4}
}
test("basic creation of StageInfo with shuffle") {
sc = new SparkContext("local", "SparkListenerSuite")
val listener = new SaveStageAndTaskInfo
sc.addSparkListener(listener)
val rdd1 = sc.parallelize(1 to 100, 4)
val rdd2 = rdd1.filter(_ % 2 == 0).map(i => (i, i))
val rdd3 = rdd2.reduceByKey(_ + _)
rdd1.setName("Un")
rdd2.setName("Deux")
rdd3.setName("Trois")
rdd1.count()
sc.listenerBus.waitUntilEmpty()
listener.stageInfos.size should be {1}
val stageInfo1 = listener.stageInfos.keys.find(_.stageId == 0).get
stageInfo1.rddInfos.size should be {1} // ParallelCollectionRDD
stageInfo1.rddInfos.forall(_.numPartitions == 4) should be {true}
stageInfo1.rddInfos.exists(_.name == "Un") should be {true}
listener.stageInfos.clear()
rdd2.count()
sc.listenerBus.waitUntilEmpty()
listener.stageInfos.size should be {1}
val stageInfo2 = listener.stageInfos.keys.find(_.stageId == 1).get
stageInfo2.rddInfos.size should be {3}
stageInfo2.rddInfos.forall(_.numPartitions == 4) should be {true}
stageInfo2.rddInfos.exists(_.name == "Deux") should be {true}
listener.stageInfos.clear()
rdd3.count()
sc.listenerBus.waitUntilEmpty()
listener.stageInfos.size should be {2} // Shuffle map stage + result stage
val stageInfo3 = listener.stageInfos.keys.find(_.stageId == 3).get
stageInfo3.rddInfos.size should be {1} // ShuffledRDD
stageInfo3.rddInfos.forall(_.numPartitions == 4) should be {true}
stageInfo3.rddInfos.exists(_.name == "Trois") should be {true}
}
test("StageInfo with fewer tasks than partitions") {
sc = new SparkContext("local", "SparkListenerSuite")
val listener = new SaveStageAndTaskInfo
sc.addSparkListener(listener)
val rdd1 = sc.parallelize(1 to 100, 4)
val rdd2 = rdd1.map(_.toString)
sc.runJob(rdd2, (items: Iterator[String]) => items.size, Seq(0, 1))
sc.listenerBus.waitUntilEmpty()
listener.stageInfos.size should be {1}
val (stageInfo, _) = listener.stageInfos.head
stageInfo.numTasks should be {2}
stageInfo.rddInfos.size should be {2}
stageInfo.rddInfos.forall(_.numPartitions == 4) should be {true}
}
test("local metrics") {
sc = new SparkContext("local", "SparkListenerSuite")
val listener = new SaveStageAndTaskInfo
sc.addSparkListener(listener)
sc.addSparkListener(new StatsReportListener)
// just to make sure some of the tasks and their deserialization take a noticeable
// amount of time
val slowDeserializable = new SlowDeserializable
val w = { i: Int =>
if (i == 0) {
Thread.sleep(100)
slowDeserializable.use()
}
i
}
val numSlices = 16
val d = sc.parallelize(0 to 10000, numSlices).map(w)
d.count()
sc.listenerBus.waitUntilEmpty()
listener.stageInfos.size should be (1)
val d2 = d.map { i => w(i) -> i * 2 }.setName("shuffle input 1")
val d3 = d.map { i => w(i) -> (0 to (i % 5)) }.setName("shuffle input 2")
val d4 = d2.cogroup(d3, numSlices).map { case (k, (v1, v2)) =>
(w(k), (v1.size, v2.size))
}
d4.setName("A Cogroup")
d4.collectAsMap()
sc.listenerBus.waitUntilEmpty()
listener.stageInfos.size should be (4)
listener.stageInfos.foreach { case (stageInfo, taskInfoMetrics) =>
/**
* Small test, so some tasks might take less than 1 millisecond, but average should be greater
* than 0 ms.
*/
checkNonZeroAvg(
taskInfoMetrics.map(_._2.executorRunTime),
stageInfo + " executorRunTime")
checkNonZeroAvg(
taskInfoMetrics.map(_._2.executorDeserializeTime),
stageInfo + " executorDeserializeTime")
/* Test is disabled (SEE SPARK-2208)
if (stageInfo.rddInfos.exists(_.name == d4.name)) {
checkNonZeroAvg(
taskInfoMetrics.map(_._2.shuffleReadMetrics.get.fetchWaitTime),
stageInfo + " fetchWaitTime")
}
*/
taskInfoMetrics.foreach { case (taskInfo, taskMetrics) =>
taskMetrics.resultSize should be > (0L)
if (stageInfo.rddInfos.exists(info => info.name == d2.name || info.name == d3.name)) {
assert(taskMetrics.shuffleWriteMetrics.bytesWritten > 0L)
}
if (stageInfo.rddInfos.exists(_.name == d4.name)) {
assert(taskMetrics.shuffleReadMetrics.totalBlocksFetched == 2 * numSlices)
assert(taskMetrics.shuffleReadMetrics.localBlocksFetched == 2 * numSlices)
assert(taskMetrics.shuffleReadMetrics.remoteBlocksFetched == 0)
assert(taskMetrics.shuffleReadMetrics.remoteBytesRead == 0L)
}
}
}
}
test("onTaskGettingResult() called when result fetched remotely") {
val conf = new SparkConf().set(RPC_MESSAGE_MAX_SIZE, 1)
sc = new SparkContext("local", "SparkListenerSuite", conf)
val listener = new SaveTaskEvents
sc.addSparkListener(listener)
// Make a task whose result is larger than the RPC message size
val maxRpcMessageSize = RpcUtils.maxMessageSizeBytes(conf)
assert(maxRpcMessageSize === 1024 * 1024)
val result = sc.parallelize(Seq(1), 1)
.map { x => 1.to(maxRpcMessageSize).toArray }
.reduce { case (x, y) => x }
assert(result === 1.to(maxRpcMessageSize).toArray)
sc.listenerBus.waitUntilEmpty()
val TASK_INDEX = 0
assert(listener.startedTasks.contains(TASK_INDEX))
assert(listener.startedGettingResultTasks.contains(TASK_INDEX))
assert(listener.endedTasks.contains(TASK_INDEX))
}
test("onTaskGettingResult() not called when result sent directly") {
sc = new SparkContext("local", "SparkListenerSuite")
val listener = new SaveTaskEvents
sc.addSparkListener(listener)
// Make a task whose result is larger than the RPC message size
val result = sc.parallelize(Seq(1), 1).map(2 * _).reduce { case (x, y) => x }
assert(result === 2)
sc.listenerBus.waitUntilEmpty()
val TASK_INDEX = 0
assert(listener.startedTasks.contains(TASK_INDEX))
assert(listener.startedGettingResultTasks.isEmpty)
assert(listener.endedTasks.contains(TASK_INDEX))
}
test("onTaskEnd() should be called for all started tasks, even after job has been killed") {
sc = new SparkContext("local", "SparkListenerSuite")
val WAIT_TIMEOUT_MILLIS = 10000
val listener = new SaveTaskEvents
sc.addSparkListener(listener)
val numTasks = 10
val f = sc.parallelize(1 to 10000, numTasks).map { i => Thread.sleep(10); i }.countAsync()
// Wait until one task has started (because we want to make sure that any tasks that are started
// have corresponding end events sent to the listener).
var finishTime = System.currentTimeMillis + WAIT_TIMEOUT_MILLIS
listener.synchronized {
var remainingWait = finishTime - System.currentTimeMillis
while (listener.startedTasks.isEmpty && remainingWait > 0) {
listener.wait(remainingWait)
remainingWait = finishTime - System.currentTimeMillis
}
assert(!listener.startedTasks.isEmpty)
}
f.cancel()
// Ensure that onTaskEnd is called for all started tasks.
finishTime = System.currentTimeMillis + WAIT_TIMEOUT_MILLIS
listener.synchronized {
var remainingWait = finishTime - System.currentTimeMillis
while (listener.endedTasks.size < listener.startedTasks.size && remainingWait > 0) {
listener.wait(finishTime - System.currentTimeMillis)
remainingWait = finishTime - System.currentTimeMillis
}
assert(listener.endedTasks.size === listener.startedTasks.size)
}
}
test("SparkListener moves on if a listener throws an exception") {
val badListener = new BadListener
val jobCounter1 = new BasicJobCounter
val jobCounter2 = new BasicJobCounter
val bus = new LiveListenerBus(new SparkConf())
// Propagate events to bad listener first
bus.addToSharedQueue(badListener)
bus.addToSharedQueue(jobCounter1)
bus.addToSharedQueue(jobCounter2)
bus.start(mockSparkContext, mockMetricsSystem)
// Post events to all listeners, and wait until the queue is drained
(1 to 5).foreach { _ => bus.post(SparkListenerJobEnd(0, jobCompletionTime, JobSucceeded)) }
bus.waitUntilEmpty()
// The exception should be caught, and the event should be propagated to other listeners
assert(jobCounter1.count === 5)
assert(jobCounter2.count === 5)
}
test("registering listeners via spark.extraListeners") {
val listeners = Seq(
classOf[ListenerThatAcceptsSparkConf],
classOf[FirehoseListenerThatAcceptsSparkConf],
classOf[BasicJobCounter])
val conf = new SparkConf().setMaster("local").setAppName("test")
.set(EXTRA_LISTENERS, listeners.map(_.getName))
sc = new SparkContext(conf)
sc.listenerBus.listeners.asScala.count(_.isInstanceOf[BasicJobCounter]) should be (1)
sc.listenerBus.listeners.asScala
.count(_.isInstanceOf[ListenerThatAcceptsSparkConf]) should be (1)
sc.listenerBus.listeners.asScala
.count(_.isInstanceOf[FirehoseListenerThatAcceptsSparkConf]) should be (1)
}
test("add and remove listeners to/from LiveListenerBus queues") {
val bus = new LiveListenerBus(new SparkConf(false))
val counter1 = new BasicJobCounter()
val counter2 = new BasicJobCounter()
val counter3 = new BasicJobCounter()
bus.addToSharedQueue(counter1)
bus.addToStatusQueue(counter2)
bus.addToStatusQueue(counter3)
assert(bus.activeQueues() === Set(SHARED_QUEUE, APP_STATUS_QUEUE))
assert(bus.findListenersByClass[BasicJobCounter]().size === 3)
bus.removeListener(counter1)
assert(bus.activeQueues() === Set(APP_STATUS_QUEUE))
assert(bus.findListenersByClass[BasicJobCounter]().size === 2)
bus.removeListener(counter2)
assert(bus.activeQueues() === Set(APP_STATUS_QUEUE))
assert(bus.findListenersByClass[BasicJobCounter]().size === 1)
bus.removeListener(counter3)
assert(bus.activeQueues().isEmpty)
assert(bus.findListenersByClass[BasicJobCounter]().isEmpty)
}
Seq(true, false).foreach { throwInterruptedException =>
val suffix = if (throwInterruptedException) "throw interrupt" else "set Thread interrupted"
test(s"interrupt within listener is handled correctly: $suffix") {
val conf = new SparkConf(false)
.set(LISTENER_BUS_EVENT_QUEUE_CAPACITY, 5)
val bus = new LiveListenerBus(conf)
val counter1 = new BasicJobCounter()
val counter2 = new BasicJobCounter()
val interruptingListener1 = new InterruptingListener(throwInterruptedException)
val interruptingListener2 = new InterruptingListener(throwInterruptedException)
bus.addToSharedQueue(counter1)
bus.addToSharedQueue(interruptingListener1)
bus.addToStatusQueue(counter2)
bus.addToEventLogQueue(interruptingListener2)
assert(bus.activeQueues() === Set(SHARED_QUEUE, APP_STATUS_QUEUE, EVENT_LOG_QUEUE))
assert(bus.findListenersByClass[BasicJobCounter]().size === 2)
assert(bus.findListenersByClass[InterruptingListener]().size === 2)
bus.start(mockSparkContext, mockMetricsSystem)
// after we post one event, both interrupting listeners should get removed, and the
// event log queue should be removed
bus.post(SparkListenerJobEnd(0, jobCompletionTime, JobSucceeded))
bus.waitUntilEmpty()
assert(bus.activeQueues() === Set(SHARED_QUEUE, APP_STATUS_QUEUE))
assert(bus.findListenersByClass[BasicJobCounter]().size === 2)
assert(bus.findListenersByClass[InterruptingListener]().size === 0)
assert(counter1.count === 1)
assert(counter2.count === 1)
// posting more events should be fine, they'll just get processed from the OK queue.
(0 until 5).foreach { _ => bus.post(SparkListenerJobEnd(0, jobCompletionTime, JobSucceeded)) }
bus.waitUntilEmpty()
assert(counter1.count === 6)
assert(counter2.count === 6)
// Make sure stopping works -- this requires putting a poison pill in all active queues, which
// would fail if our interrupted queue was still active, as its queue would be full.
bus.stop()
}
}
test("event queue size can be configued through spark conf") {
// configure the shared queue size to be 1, event log queue size to be 2,
// and listner bus event queue size to be 5
val conf = new SparkConf(false)
.set(LISTENER_BUS_EVENT_QUEUE_CAPACITY, 5)
.set(s"spark.scheduler.listenerbus.eventqueue.${SHARED_QUEUE}.capacity", "1")
.set(s"spark.scheduler.listenerbus.eventqueue.${EVENT_LOG_QUEUE}.capacity", "2")
val bus = new LiveListenerBus(conf)
val counter1 = new BasicJobCounter()
val counter2 = new BasicJobCounter()
val counter3 = new BasicJobCounter()
// add a new shared, status and event queue
bus.addToSharedQueue(counter1)
bus.addToStatusQueue(counter2)
bus.addToEventLogQueue(counter3)
assert(bus.activeQueues() === Set(SHARED_QUEUE, APP_STATUS_QUEUE, EVENT_LOG_QUEUE))
// check the size of shared queue is 1 as configured
assert(bus.getQueueCapacity(SHARED_QUEUE) == Some(1))
// no specific size of status queue is configured,
// it shoud use the LISTENER_BUS_EVENT_QUEUE_CAPACITY
assert(bus.getQueueCapacity(APP_STATUS_QUEUE) == Some(5))
// check the size of event log queue is 5 as configured
assert(bus.getQueueCapacity(EVENT_LOG_QUEUE) == Some(2))
}
/**
* Assert that the given list of numbers has an average that is greater than zero.
*/
private def checkNonZeroAvg(m: Iterable[Long], msg: String): Unit = {
assert(m.sum / m.size.toDouble > 0.0, msg)
}
/**
* A simple listener that saves all task infos and task metrics.
*/
private class SaveStageAndTaskInfo extends SparkListener {
val stageInfos = mutable.Map[StageInfo, Seq[(TaskInfo, TaskMetrics)]]()
var taskInfoMetrics = mutable.Buffer[(TaskInfo, TaskMetrics)]()
override def onTaskEnd(task: SparkListenerTaskEnd): Unit = {
val info = task.taskInfo
val metrics = task.taskMetrics
if (info != null && metrics != null) {
taskInfoMetrics += ((info, metrics))
}
}
override def onStageCompleted(stage: SparkListenerStageCompleted): Unit = {
stageInfos(stage.stageInfo) = taskInfoMetrics
taskInfoMetrics = mutable.Buffer.empty
}
}
/**
* A simple listener that saves the task indices for all task events.
*/
private class SaveTaskEvents extends SparkListener {
val startedTasks = new mutable.HashSet[Int]()
val startedGettingResultTasks = new mutable.HashSet[Int]()
val endedTasks = new mutable.HashSet[Int]()
override def onTaskStart(taskStart: SparkListenerTaskStart): Unit = synchronized {
startedTasks += taskStart.taskInfo.index
notify()
}
override def onTaskEnd(taskEnd: SparkListenerTaskEnd): Unit = synchronized {
endedTasks += taskEnd.taskInfo.index
notify()
}
override def onTaskGettingResult(taskGettingResult: SparkListenerTaskGettingResult): Unit = {
startedGettingResultTasks += taskGettingResult.taskInfo.index
}
}
/**
* A simple listener that throws an exception on job end.
*/
private class BadListener extends SparkListener {
override def onJobEnd(jobEnd: SparkListenerJobEnd): Unit = { throw new Exception }
}
/**
* A simple listener that interrupts on job end.
*/
private class InterruptingListener(val throwInterruptedException: Boolean) extends SparkListener {
override def onJobEnd(jobEnd: SparkListenerJobEnd): Unit = {
if (throwInterruptedException) {
throw new InterruptedException("got interrupted")
} else {
Thread.currentThread().interrupt()
}
}
}
}
// These classes can't be declared inside of the SparkListenerSuite class because we don't want
// their constructors to contain references to SparkListenerSuite:
/**
* A simple listener that counts the number of jobs observed.
*/
private class BasicJobCounter extends SparkListener {
var count = 0
override def onJobEnd(job: SparkListenerJobEnd): Unit = count += 1
}
/**
* A simple listener that tries to stop SparkContext.
*/
private class SparkContextStoppingListener(val sc: SparkContext) extends SparkListener {
@volatile var sparkExSeen = false
override def onJobEnd(job: SparkListenerJobEnd): Unit = {
try {
sc.stop()
} catch {
case se: SparkException =>
sparkExSeen = true
}
}
}
private class ListenerThatAcceptsSparkConf(conf: SparkConf) extends SparkListener {
var count = 0
override def onJobEnd(job: SparkListenerJobEnd): Unit = count += 1
}
private class FirehoseListenerThatAcceptsSparkConf(conf: SparkConf) extends SparkFirehoseListener {
var count = 0
override def onEvent(event: SparkListenerEvent): Unit = event match {
case job: SparkListenerJobEnd => count += 1
case _ =>
}
}
private class SlowDeserializable extends Externalizable {
override def writeExternal(out: ObjectOutput): Unit = { }
override def readExternal(in: ObjectInput): Unit = Thread.sleep(1)
def use(): Unit = { }
}
|
caneGuy/spark
|
core/src/test/scala/org/apache/spark/scheduler/SparkListenerSuite.scala
|
Scala
|
apache-2.0
| 25,500 |
/*
* Copyright 2017 PayPal
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.squbs.httpclient
import akka.actor.ActorSystem
import akka.http.scaladsl.settings.ConnectionPoolSettings
import com.typesafe.config.ConfigFactory
import org.scalatest.flatspec.AnyFlatSpec
import org.scalatest.matchers.should.Matchers
import org.squbs.resolver._
import org.squbs.unicomplex.JMX
import org.squbs.util.ConfigUtil._
import java.lang.management.ManagementFactory
import javax.management.ObjectName
import scala.concurrent.duration.Duration
object ClientConfigurationSpec {
val defaultConfig = ConfigFactory.load()
val appConfig = ConfigFactory.parseString(
s"""
|squbs {
| ${JMX.prefixConfig} = true
|}
|
|sampleClient {
| type = squbs.httpclient
|
| akka.http {
| host-connection-pool {
| max-connections = 987
| max-retries = 123
|
| client = {
| connecting-timeout = 123 ms
| }
| }
| }
|}
|
|sampleClient2 {
| type = squbs.httpclient
|
| akka.http.host-connection-pool {
| max-connections = 666
| }
|}
|
|noOverrides {
| type = squbs.httpclient
|}
|
|noType {
|
| akka.http.host-connection-pool {
| max-connections = 987
| max-retries = 123
| }
|}
|
|passedAsParameter {
| type = squbs.httpclient
|
| akka.http.host-connection-pool {
| max-connections = 111
| }
|}
|
|resolverConfig {
| type = squbs.httpclient
| akka.http.host-connection-pool {
| max-connections = 111
| }
|}
""".stripMargin)
val resolverConfig = ConfigFactory.parseString(
"""
|akka.http.host-connection-pool {
| max-connections = 987
| max-retries = 123
|}
""".stripMargin)
implicit val system = ActorSystem("ClientConfigurationSpec", appConfig.withFallback(defaultConfig))
ResolverRegistry(system).register[HttpEndpoint]("LocalhostEndpointResolver") { (name, _) =>
name match {
case "resolverConfig" => Some(HttpEndpoint(s"http://localhost:1234", None, Some(resolverConfig)))
case _ => Some(HttpEndpoint(s"http://localhost:1234"))
}
}
trait TypeConverter[T] {
def convert(src: Any): T
}
implicit val durationConverter = new TypeConverter[Duration] {
override def convert(src: Any): Duration = src match {
case d: Duration => d
case x => Duration(x.toString)
}
}
implicit val stringConverter = new TypeConverter[String] {
override def convert(src: Any): String = src.toString
}
implicit val intConverter = new TypeConverter[Int] {
override def convert(src: Any):Int = src match {
case i: Int => i
case x => x.toString.toInt
}
}
}
class ClientConfigurationSpec extends AnyFlatSpec with Matchers {
import ClientConfigurationSpec._
it should "give priority to client specific configuration" in {
ClientFlow("sampleClient")
assertJmxValue("sampleClient", "MaxConnections",
appConfig.getInt("sampleClient.akka.http.host-connection-pool.max-connections"))
assertJmxValue("sampleClient", "MaxRetries",
appConfig.getInt("sampleClient.akka.http.host-connection-pool.max-retries"))
assertJmxValue("sampleClient", "ConnectionPoolIdleTimeout",
defaultConfig.get[Duration]("akka.http.host-connection-pool.idle-timeout"))
assertJmxValue("sampleClient", "ConnectingTimeout",
appConfig.get[Duration]("sampleClient.akka.http.host-connection-pool.client.connecting-timeout"))
}
it should "fallback to default values if no client specific configuration is provided" in {
ClientFlow("noSpecificConfiguration")
assertDefaults("noSpecificConfiguration")
}
it should "fallback to default values if client configuration does not override any properties" in {
ClientFlow("noOverrides")
assertDefaults("noOverrides")
}
it should "fallback to resolver config first then default values if client configuration is missing the property" in {
ClientFlow("resolverConfig")
assertJmxValue("resolverConfig", "MaxConnections", 111)
assertJmxValue("resolverConfig", "MaxRetries", 123)
assertJmxValue("resolverConfig", "ConnectionPoolIdleTimeout",
defaultConfig.get[Duration]("akka.http.host-connection-pool.idle-timeout"))
}
it should "ignore client specific configuration if type is not set to squbs.httpclient" in {
ClientFlow("noType")
assertDefaults("noType")
}
it should "let configuring multiple clients" in {
ClientFlow("sampleClient2")
assertJmxValue("sampleClient", "MaxConnections",
appConfig.getInt("sampleClient.akka.http.host-connection-pool.max-connections"))
assertJmxValue("sampleClient", "MaxRetries",
appConfig.getInt("sampleClient.akka.http.host-connection-pool.max-retries"))
assertJmxValue("sampleClient", "ConnectionPoolIdleTimeout",
defaultConfig.get[Duration]("akka.http.host-connection-pool.idle-timeout"))
assertJmxValue("sampleClient", "ConnectingTimeout",
appConfig.get[Duration]("sampleClient.akka.http.host-connection-pool.client.connecting-timeout"))
assertJmxValue("sampleClient2", "MaxConnections",
appConfig.getInt("sampleClient2.akka.http.host-connection-pool.max-connections"))
assertJmxValue("sampleClient2", "MaxRetries", defaultConfig.getInt("akka.http.host-connection-pool.max-retries"))
assertJmxValue("sampleClient2", "ConnectionPoolIdleTimeout",
defaultConfig.get[Duration]("akka.http.host-connection-pool.idle-timeout"))
}
it should "configure even if not present in conf file" in {
ClientFlow("notInConfig")
assertDefaults("notInConfig")
}
it should "give priority to passed in settings" in {
val MaxConnections = 8778
val cps = ConnectionPoolSettings(system.settings.config).withMaxConnections(MaxConnections)
ClientFlow("passedAsParameter", settings = Some(cps))
assertJmxValue("passedAsParameter", "MaxConnections", MaxConnections)
}
private def assertJmxValue[T: TypeConverter](clientName: String, key: String, expectedValue: T) = {
val oName = ObjectName.getInstance(
s"org.squbs.configuration.${system.name}:type=squbs.httpclient,name=${ObjectName.quote(clientName)}")
val actualValue = implicitly[TypeConverter[T]]
.convert(ManagementFactory.getPlatformMBeanServer.getAttribute(oName, key))
actualValue shouldEqual expectedValue
}
private def assertDefaults(clientName: String) = {
assertJmxValue(clientName, "MaxConnections", defaultConfig.getInt("akka.http.host-connection-pool.max-connections"))
assertJmxValue(clientName, "MaxRetries", defaultConfig.getInt("akka.http.host-connection-pool.max-retries"))
assertJmxValue(clientName, "ConnectionPoolIdleTimeout",
defaultConfig.get[Duration]("akka.http.host-connection-pool.idle-timeout"))
}
}
|
akara/squbs
|
squbs-httpclient/src/test/scala/org/squbs/httpclient/ClientConfigurationSpec.scala
|
Scala
|
apache-2.0
| 7,584 |
package im.actor.server.model.presences
import org.joda.time.DateTime
@SerialVersionUID(1L)
case class UserPresence(userId: Int, authId: Long, lastSeenAt: Option[DateTime])
|
EaglesoftZJ/actor-platform
|
actor-server/actor-models/src/main/scala/im/actor/server/model/presences/UserPresence.scala
|
Scala
|
agpl-3.0
| 175 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.kafka010
import java.{util => ju}
import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.kafka.common.TopicPartition
import org.apache.spark.{Partition, SparkContext, TaskContext}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.kafka010.consumer.KafkaDataConsumer
import org.apache.spark.storage.StorageLevel
import org.apache.spark.util.NextIterator
/** Offset range that one partition of the KafkaSourceRDD has to read */
private[kafka010] case class KafkaSourceRDDOffsetRange(
topicPartition: TopicPartition,
fromOffset: Long,
untilOffset: Long,
preferredLoc: Option[String]) {
def topic: String = topicPartition.topic
def partition: Int = topicPartition.partition
def size: Long = untilOffset - fromOffset
}
/** Partition of the KafkaSourceRDD */
private[kafka010] case class KafkaSourceRDDPartition(
index: Int, offsetRange: KafkaSourceRDDOffsetRange) extends Partition
/**
* An RDD that reads data from Kafka based on offset ranges across multiple partitions.
* Additionally, it allows preferred locations to be set for each topic + partition, so that
* the [[KafkaSource]] can ensure the same executor always reads the same topic + partition
* and cached KafkaConsumers (see [[KafkaDataConsumer]] can be used read data efficiently.
*
* @param sc the [[SparkContext]]
* @param executorKafkaParams Kafka configuration for creating KafkaConsumer on the executors
* @param offsetRanges Offset ranges that define the Kafka data belonging to this RDD
*/
private[kafka010] class KafkaSourceRDD(
sc: SparkContext,
executorKafkaParams: ju.Map[String, Object],
offsetRanges: Seq[KafkaSourceRDDOffsetRange],
pollTimeoutMs: Long,
failOnDataLoss: Boolean)
extends RDD[ConsumerRecord[Array[Byte], Array[Byte]]](sc, Nil) {
override def persist(newLevel: StorageLevel): this.type = {
logError("Kafka ConsumerRecord is not serializable. " +
"Use .map to extract fields before calling .persist or .window")
super.persist(newLevel)
}
override def getPartitions: Array[Partition] = {
offsetRanges.zipWithIndex.map { case (o, i) => new KafkaSourceRDDPartition(i, o) }.toArray
}
override def getPreferredLocations(split: Partition): Seq[String] = {
val part = split.asInstanceOf[KafkaSourceRDDPartition]
part.offsetRange.preferredLoc.map(Seq(_)).getOrElse(Seq.empty)
}
override def compute(
thePart: Partition,
context: TaskContext): Iterator[ConsumerRecord[Array[Byte], Array[Byte]]] = {
val sourcePartition = thePart.asInstanceOf[KafkaSourceRDDPartition]
val consumer = KafkaDataConsumer.acquire(
sourcePartition.offsetRange.topicPartition, executorKafkaParams)
val range = resolveRange(consumer, sourcePartition.offsetRange)
assert(
range.fromOffset <= range.untilOffset,
s"Beginning offset ${range.fromOffset} is after the ending offset ${range.untilOffset} " +
s"for topic ${range.topic} partition ${range.partition}. " +
"You either provided an invalid fromOffset, or the Kafka topic has been damaged")
if (range.fromOffset == range.untilOffset) {
logInfo(s"Beginning offset ${range.fromOffset} is the same as ending offset " +
s"skipping ${range.topic} ${range.partition}")
consumer.release()
Iterator.empty
} else {
val underlying = new NextIterator[ConsumerRecord[Array[Byte], Array[Byte]]]() {
var requestOffset = range.fromOffset
override def getNext(): ConsumerRecord[Array[Byte], Array[Byte]] = {
if (requestOffset >= range.untilOffset) {
// Processed all offsets in this partition.
finished = true
null
} else {
val r = consumer.get(requestOffset, range.untilOffset, pollTimeoutMs, failOnDataLoss)
if (r == null) {
// Losing some data. Skip the rest offsets in this partition.
finished = true
null
} else {
requestOffset = r.offset + 1
r
}
}
}
override protected def close(): Unit = {
consumer.release()
}
}
// Release consumer, either by removing it or indicating we're no longer using it
context.addTaskCompletionListener[Unit] { _ =>
underlying.closeIfNeeded()
}
underlying
}
}
private def resolveRange(consumer: KafkaDataConsumer, range: KafkaSourceRDDOffsetRange) = {
if (range.fromOffset < 0 || range.untilOffset < 0) {
// Late bind the offset range
val availableOffsetRange = consumer.getAvailableOffsetRange()
val fromOffset = if (range.fromOffset < 0) {
assert(range.fromOffset == KafkaOffsetRangeLimit.EARLIEST,
s"earliest offset ${range.fromOffset} does not equal ${KafkaOffsetRangeLimit.EARLIEST}")
availableOffsetRange.earliest
} else {
range.fromOffset
}
val untilOffset = if (range.untilOffset < 0) {
assert(range.untilOffset == KafkaOffsetRangeLimit.LATEST,
s"latest offset ${range.untilOffset} does not equal ${KafkaOffsetRangeLimit.LATEST}")
availableOffsetRange.latest
} else {
range.untilOffset
}
KafkaSourceRDDOffsetRange(range.topicPartition,
fromOffset, untilOffset, range.preferredLoc)
} else {
range
}
}
}
|
jkbradley/spark
|
external/kafka-0-10-sql/src/main/scala/org/apache/spark/sql/kafka010/KafkaSourceRDD.scala
|
Scala
|
apache-2.0
| 6,235 |
package wallet
import org.springframework.context.annotation.Configuration
import org.springframework.boot.autoconfigure.EnableAutoConfiguration
import org.springframework.context.annotation.ComponentScan
import org.springframework.web.bind.annotation.RequestMapping
import org.springframework.web.bind.annotation.ResponseBody
import org.springframework.web.bind.annotation.RequestMethod
import org.springframework.web.bind.annotation.RequestParam
import org.springframework.web.bind.annotation.RestController
import scala.collection.JavaConversions._
import scala.reflect.{BeanProperty, BooleanBeanProperty}
import scala.util.Random
import java.util.Date
import org.apache.commons.lang3.time.DateFormatUtils
class IdCard{
// @BeanProperty
//var carduser_id: Int = _
@BeanProperty
var card_id: Int = _
@BeanProperty
var card_name: String = _
@BeanProperty
var card_number: String = _
@BeanProperty
var expiration_date: String = DateFormatUtils.ISO_DATETIME_TIME_ZONE_FORMAT.format(new Date())
}
|
meghanagogi/DigitalWallet
|
src/main/scala/wallet/IDCard.scala
|
Scala
|
mit
| 1,081 |
package json
import org.apache.spark._
import org.apache.spark.sql._
object SparkJSON
{
def main(args: Array[String])
{
// generate our Spark job's configuration
val conf = new SparkConf().setAppName("Spark JSON").setMaster("local[1]")
val spark = new SparkContext(conf)
// all data related commands occur through a SQLContext
val sql_context: SQLContext = new SQLContext(spark)
// a DataFrame is an abstraction; all data types are meant to be represented in a DataFrame
// essentially a DataFrame is a set of data with a schema
val data_frame: DataFrame = sql_context.read.json("data/people_spark.json")
// we use SQL like commands on DataFrames to do our work
data_frame.printSchema()
data_frame.show()
data_frame.select("address.city").show()
// we can also perform direct SQL queries
data_frame.registerTempTable("people")
sql_context.sql("SELECT * FROM people WHERE address.city = 'Baltimore'").show()
spark.stop()
}
}
|
chivesrs/spark-demo
|
src/main/scala/json/SparkJSON.scala
|
Scala
|
gpl-3.0
| 1,066 |
package net.shift.server.http
import java.net.URLDecoder
import java.nio.ByteBuffer
import scala.util.Try
import net.shift.common.{BinReader, LogBuilder, Path, ShiftParsers}
import net.shift.io.{IO, LocalFileSystem}
import net.shift.io.IO._
class HttpParser extends ShiftParsers {
private val log = LogBuilder.logger(classOf[HttpParser])
def uri: Parser[Uri] = ws ~> (opt((str("http://") ~> notReserved()) ~ opt(chr(':') ~> int)) ~ opt(path) ~ (ws ~> opt(chr('?') ~> params))) ^^ {
case Some(host ~ port) ~ path ~ params => Uri(Some(host), port, URLDecoder.decode(path getOrElse "/", "UTF-8"), params getOrElse Nil)
case None ~ path ~ params => Uri(None, None, URLDecoder.decode(path getOrElse "/", "UTF-8"), params getOrElse Nil)
}
def params: Parser[List[Param]] = repsep(notReserved() ~ opt(chr('=') ~> repsep(notReserved(), chr(','))), chr('&')) ^^ {
_ map {
case name ~ Some(value) => Param(URLDecoder.decode(name, "UTF-8"), value map { URLDecoder.decode(_, "UTF-8") })
case name ~ _ => Param(URLDecoder.decode(name, "UTF-8"), Nil)
}
}
def httpLine: Parser[(String, Uri, Ver)] = capitals ~ uri ~ (str("HTTP/") ~> digit) ~ (chr('.') ~> digit) <~ crlf ^^ {
case method ~ uri ~ major ~ minor =>
(method,
uri,
Ver(major, minor))
}
def cookie: Parser[List[Cookie]] = (str("Cookie") <~ chr(':') <~ ws) ~> repsep((ws ~> notReserved() <~ ws <~ chr('=') <~ ws) ~ notReserved('='), chr(';')) <~ crlf ^^ {
_ map {
case k ~ v => Cookie(k, v)
}
}
def header: Parser[List[TextHeader]] = ((notReserved() <~ chr(':') <~ ws) ~ until(crlf, retryPInput = false)) ^^ {
case name ~ value =>
List(TextHeader(name.trim, IO.bufferToString(value)))
}
def httpHeaders: Parser[Seq[HeaderItem]] = rep(cookie | header) ^^ { _ flatten }
def httpBody: Parser[Body] = until(atEnd, retryPInput = false) ^^ { a =>
Body(List(a))
}
def http: Parser[Request] = httpLine ~ httpHeaders ~ (crlf ~> opt(httpBody)) ^^ {
case (method, uri, ver) ~ headers ~ body =>
Request(method, uri, ver, headers, body getOrElse Body.empty)
}
def parse(reader: BinReader): Try[Request] = {
if (log.isDebug) {
val bufs = reader.in.map { _.duplicate }
log.debug("Parsing data " + (for { b <- bufs } yield {
b.toString
}).mkString("\\n"))
log.debug(IO.buffersToString(bufs))
}
http(reader) match {
case Success(r, _) => scala.util.Success(r)
case Failure(f, p) =>
log.debug("Failed at position: " + p.pos.column + " " + f)
scala.util.Failure(new Exception("Failed at position: " + p.pos.column + " " + f))
case Error(f, p) =>
log.debug("Error at position: " + p.pos.column + " " + f)
scala.util.Failure(new Exception("Error at position: " + p.pos.column + " " + f))
}
}
def parseParams(p: String): Try[List[Param]] = params(BinReader(List(ByteBuffer.wrap(p.getBytes("UTF-8"))))) match {
case Success(r, _) => scala.util.Success(r)
case Failure(f, _) =>
scala.util.Failure(new Exception(f))
case Error(f, _) =>
scala.util.Failure(new Exception(f))
}
def parse(http: String): Try[Request] = parse(BinReader(List(ByteBuffer.wrap(http.getBytes("UTF-8")))))
}
|
mariusdanciu/shift
|
shift-server/src/main/scala/net/shift/server/http/HttpParser.scala
|
Scala
|
apache-2.0
| 3,296 |
package com.googlecode.kanbanik.commands
import com.googlecode.kanbanik.builders.ProjectBuilder
import scala.collection.JavaConversions._
import com.googlecode.kanbanik.model.Board
import org.bson.types.ObjectId
import com.googlecode.kanbanik.dtos.{ErrorDto, ProjectDto}
class SaveProjectCommand extends Command[ProjectDto, ProjectDto] {
private lazy val projectBuilder = new ProjectBuilder()
def execute(params: ProjectDto): Either[ProjectDto, ErrorDto] = {
if (params.boardIds.isDefined) {
for (board <- params.boardIds.get) {
try {
Board.byId(new ObjectId(board), false)
} catch {
case e: IllegalArgumentException =>
Right(ErrorDto("The board '" + board + "' to which this project is assigned does not exists. Possibly it has been deleted by a different user. Please refresh your browser to get the current data."))
}
}
}
val project = projectBuilder.buildEntity(params)
Left(projectBuilder.buildDto(project.store))
}
}
|
mortenpoulsen/kanbanik
|
kanbanik-server/src/main/scala/com/googlecode/kanbanik/commands/SaveProjectCommand.scala
|
Scala
|
apache-2.0
| 1,020 |
package io.dylemma.spac
import io.dylemma.spac.impl.{SplitterByConsecutiveMatches, SplitterByContextMatch, SplitterByInputMatch, SplitterJoiner}
/** Primary "spac" abstraction that acts as a selector for sub-streams within a single input stream.
*
* A "sub-stream" is some series of consecutive values from the original stream, identified by a "context" value.
* Sub-streams do not overlap with each other.
*
* For example, when handling a stream of XML events, you might want to create a Splitter that identifies
* the events representing elements at a specific location within the XML; something like an XPATH that operates on streams.
* When using `xml-spac`, you might construct a splitter like `Splitter.xml("rootElem" \\ "things" \\ "thing")`.
* This would identify a new sub-stream for each `<thing>` element that appears inside a `<things>` element, inside the `<rootElem>` element.
* An example sub-stream for a `<thing>` element might be `ElemStart("thing"), Text("hello"), ElemEnd("thing")`.
*
* A Splitter's general goal is to attach a Parser or Transformer to each sub-stream, passing the contents of that sub-stream
* through the attached Parser or Transformer in order to get an interpretation of that sub-stream (i.e. the Parser's result,
* or some emitted outputs from a Transformer).
* With the `<thing>` example above, you might attach a parser that concatenates the context all Text events it sees.
* I.e. `XmlParser.forText`. Since a separate parser handler will run for each sub-stream, this becomes something like
* "A stream of Strings which each represent the concatenated text from an individual `<thing>` element".
*
* @tparam In Data event type for the input stream
* @tparam C Context type used to identify each sub-stream
* @group primary
*/
trait Splitter[In, +C] {
/** Inject "boundary" events into an input stream, where a `ContextPush` represents the
* beginning of a new sub-stream, and a `ContextPop` represents the end of a sub-stream.
*
* @return A transformer that injects the boundary events into any given input stream
*/
def addBoundaries: Transformer[In, Either[ContextChange[In, C], In]]
/** Creates a new transformer by attaching a new parser to each sub-stream based on the sub-stream context.
* For each sub-stream, a new parser will be created, and inputs from the sub-stream will be piped into that parser.
* When the sub-stream ends, or if the parser finishes on its own, the parser's result will be emitted as an `Out` event.
*
* @param parseMatches Given the context for a sub-stream, return a parser to handle that sub-stream
* @tparam Out The parser's output type
* @return A transformer that will emit the result of each parsed sub-stream
*/
def map[Out](parseMatches: C => Parser[In, Out]): Transformer[In, Out] = mapTraced(push => parseMatches(push.context))
/** Like `map`, but using the `ContextPush` associated with the sub-stream, instead of just the context value itself.
*
* @param parseMatches
* @tparam Out
* @return
*/
def mapTraced[Out](parseMatches: ContextPush[In, C] => Parser[In, Out]): Transformer[In, Out] = flatMap { push =>
parseMatches(push).asTransformer
}
/** Like `map`, but when you want to use the same parser for each sub-stream, regardless of the context value
*/
def joinBy[Out](parser: Parser[In, Out]): Transformer[In, Out] = mapTraced(new ConstFunction(parser))
/** Like `joinBy`, but the parser is passed implicitly
*/
def as[Out](implicit parser: Parser[In, Out]) = mapTraced(new ConstFunction(parser))
private class ConstFunction[A](result: A) extends (Any => A) {
def apply(v1: Any) = result
override def toString = result.toString
}
/** Creates a new transformer by attaching an "inner" transformer to each sub-stream based on the sub-stream context.
* For each sub-stream, a new transformer will be created, and the inputs from the sub-stream will be piped into the inner transformer.
* Anything that the inner transformer emits will be emitted by the returned transformer.
*/
def flatMap[Out](transformMatches: ContextPush[In, C] => Transformer[In, Out]): Transformer[In, Out] = addBoundaries through SplitterJoiner(transformMatches)
}
/**
* @group primary
*/
object Splitter {
/** Convenience for creating Splitters with a specific `In` type; useful when type inference can figure out the other type parameters. */
def apply[In] = new SplitterApplyWithBoundInput[In]
/** Create a splitter that keeps track of a "stack" which is pushed and popped by `In` events,
* starting a new substream when the given `matcher` matches the stack.
*
* The primary use-case for this is when dealing with nestable data formats like XML or JSON,
* where a token could signify a push to the stack (e.g. an ElemStart event), and where you
* want to operate on events that occur within some specific stack of elements.
*
* For inputs that cause a push or pop to the stack, whether that input is included as "inside"
* the pushed context is up to the specific `StackLike` implementation.
*/
def fromMatcher[In, Elem, C](matcher: ContextMatcher[Elem, C])(implicit S: StackLike[In, Elem], pos: CallerPos): Splitter[In, C] = new SplitterByContextMatch(matcher, pos)
/** Create a splitter that starts a new substream every time the `matcher` matches.
* Any events passed through before the initial match will be discarded, but every event
* thereafter will be part of a substream. The context for a substream is based on the
* value returned by the `matcher` for the event that caused that match.
*
* For example, in a stream like `4 3 2 1 2 3 1 2 1 2 3 4`, if our matcher was `{ case 1 => "yay" }`,
* then we'd have a new substream with context "yay" every time a `1` came through:
*
* - (new context: "yay") 1 2 3
* - (new context: "yay") 1 2
* - (new context: "yay") 1 2 3 4
*
* @param matcher A PartialFunction that can extract a context value from inputs
* @tparam In The input type
* @tparam C The extracted context type
* @return A splitter that starts a new substream for every input where `matcher.isDefinedAt(input)`,
* with a context equal to `matcher(input)`.
*/
def splitOnMatch[In, C](matcher: PartialFunction[In, C]): Splitter[In, C] = new SplitterByInputMatch(matcher)
/** Create a splitter that starts a new substream every time the predicate function `p` returns true for an input.
* Any inputs passed through before the initial match will be discarded, but every event thereafter will be part
* of a substream. Context is ignored for substreams from this method - the context type is `Any`.
*
* For example, in a stream like `4 3 2 1 2 3 1 2 1 2 3 4`, if our predicate was `{ _ == 1 }`,
* then we'd have a new substream starting from each `1` input.
*
* - (new context) 1 2 3
* - (new context) 1 2
* - (new context) 1 2 3 4
*
* @param f The predicate function responsible for determining if a new context should start for an input.
* @tparam In The input type
* @return A splitter that starts a new substream for every input where `p(input) == true`
*/
def splitOnMatch[In](f: In => Boolean): Splitter[In, Any] = splitOnMatch[In, Unit] { case in if f(in) => () }
/** Create a Splitter that treats consecutive matched values as substreams.
* For example, given a matcher like `{ case c if c.isLetter => c }`, a stream like
* {{{1 2 3 A B C 4 5 6 D 7 8 E F G H 9}}}
* could be treated as having three substreams, where each substream's "context value"
* is the first letter in that group (because context is always defined by the beginning
* of the substream).
*
* - `A B C` with context `'A'` (between the 3 and 4)
* - `D` with context `'D'` (between the 6 and 7)
* - `E F G H` with context `'E'` (between the 8 and 9)
*
* @param matcher A function defining which inputs count as a "match"
* @tparam In
* @tparam Context
* @return
*/
def consecutiveMatches[In, Context](matcher: PartialFunction[In, Context]): Splitter[In, Context] = new SplitterByConsecutiveMatches(matcher)
/** Create a Splitter that treats consecutive values matching the predicate `p` as
* substreams with no particular context value.
* For example, given a matcher like `i => i % 2 == 0`, a stream like
* {{{1 3 2 2 4 5 6 7 8 10 4 3 1}}}
* could be treated as having three substreams:
*
* - `2 2 4`
* - `6`
* - `8 10 4`
*
* @param p
* @tparam In
* @return
*/
def consecutiveMatches[In](p: In => Boolean): Splitter[In, Any] = consecutiveMatches[In, Any] { case in if p(in) => () }
}
/**
* @tparam In
* @group util
*/
class SplitterApplyWithBoundInput[In] {
/** See `Splitter.fromMatcher` */
def fromMatcher[Elem, C](matcher: ContextMatcher[Elem, C])(implicit S: StackLike[In, Elem], pos: CallerPos): Splitter[In, C] = Splitter.fromMatcher(matcher)
/** See `Splitter.splitOnMatch` */
def splitOnMatch[C](matcher: PartialFunction[In, C]): Splitter[In, C] = Splitter.splitOnMatch(matcher)
/** See `Splitter.splitOnMatch` */
def splitOnMatch(f: In => Boolean): Splitter[In, Any] = Splitter.splitOnMatch(f)
/** See `Splitter.consecutiveMatches` */
def consecutiveMatches[Context](matcher: PartialFunction[In, Context]): Splitter[In, Context] = Splitter.consecutiveMatches(matcher)
/** See `Splitter.consecutiveMatches` */
def consecutiveMatches(p: In => Boolean): Splitter[In, Any] = Splitter.consecutiveMatches(p)
}
|
dylemma/xml-stream
|
core/src/main/scala/io/dylemma/spac/Splitter.scala
|
Scala
|
mit
| 9,568 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler.cluster
import org.apache.spark.{Logging, SparkConf, SparkContext, SparkEnv}
import org.apache.spark.deploy.{ApplicationDescription, Command}
import org.apache.spark.deploy.client.{AppClient, AppClientListener}
import org.apache.spark.scheduler.{ExecutorExited, ExecutorLossReason, SlaveLost, TaskSchedulerImpl}
import org.apache.spark.util.{AkkaUtils, Utils}
private[spark] class SparkDeploySchedulerBackend(
scheduler: TaskSchedulerImpl,
sc: SparkContext,
masters: Array[String])
extends CoarseGrainedSchedulerBackend(scheduler, sc.env.actorSystem)
with AppClientListener
with Logging {
var client: AppClient = null
var stopping = false
var shutdownCallback : (SparkDeploySchedulerBackend) => Unit = _
@volatile var appId: String = _
val registrationLock = new Object()
var registrationDone = false
val maxCores = conf.getOption("spark.cores.max").map(_.toInt)
val totalExpectedCores = maxCores.getOrElse(0)
override def start() {
super.start()
// The endpoint for executors to talk to us
val driverUrl = AkkaUtils.address(
AkkaUtils.protocol(actorSystem),
SparkEnv.driverActorSystemName,
conf.get("spark.driver.host"),
conf.get("spark.driver.port"),
CoarseGrainedSchedulerBackend.ACTOR_NAME)
val args = Seq(
"--driver-url", driverUrl,
"--executor-id", "{{EXECUTOR_ID}}",
"--hostname", "{{HOSTNAME}}",
"--cores", "{{CORES}}",
"--app-id", "{{APP_ID}}",
"--worker-url", "{{WORKER_URL}}")
val extraJavaOpts = sc.conf.getOption("spark.executor.extraJavaOptions")
.map(Utils.splitCommandString).getOrElse(Seq.empty)
val classPathEntries = sc.conf.getOption("spark.executor.extraClassPath")
.map(_.split(java.io.File.pathSeparator).toSeq).getOrElse(Nil)
val libraryPathEntries = sc.conf.getOption("spark.executor.extraLibraryPath")
.map(_.split(java.io.File.pathSeparator).toSeq).getOrElse(Nil)
// When testing, expose the parent class path to the child. This is processed by
// compute-classpath.{cmd,sh} and makes all needed jars available to child processes
// when the assembly is built with the "*-provided" profiles enabled.
val testingClassPath =
if (sys.props.contains("spark.testing")) {
sys.props("java.class.path").split(java.io.File.pathSeparator).toSeq
} else {
Nil
}
// Start executors with a few necessary configs for registering with the scheduler
val sparkJavaOpts = Utils.sparkJavaOpts(conf, SparkConf.isExecutorStartupConf)
val javaOpts = sparkJavaOpts ++ extraJavaOpts
val command = Command("org.apache.spark.executor.CoarseGrainedExecutorBackend",
args, sc.executorEnvs, classPathEntries ++ testingClassPath, libraryPathEntries, javaOpts)
val appUIAddress = sc.ui.map(_.appUIAddress).getOrElse("")
val appDesc = new ApplicationDescription(sc.appName, maxCores, sc.executorMemory, command,
appUIAddress, sc.eventLogDir, sc.eventLogCodec)
client = new AppClient(sc.env.actorSystem, masters, appDesc, this, conf)
client.start()
waitForRegistration()
}
override def stop() {
stopping = true
super.stop()
client.stop()
if (shutdownCallback != null) {
shutdownCallback(this)
}
}
override def connected(appId: String) {
logInfo("Connected to Spark cluster with app ID " + appId)
this.appId = appId
notifyContext()
}
override def disconnected() {
notifyContext()
if (!stopping) {
logWarning("Disconnected from Spark cluster! Waiting for reconnection...")
}
}
override def dead(reason: String) {
notifyContext()
if (!stopping) {
logError("Application has been killed. Reason: " + reason)
scheduler.error(reason)
// Ensure the application terminates, as we can no longer run jobs.
sc.stop()
}
}
override def executorAdded(fullId: String, workerId: String, hostPort: String, cores: Int,
memory: Int) {
logInfo("Granted executor ID %s on hostPort %s with %d cores, %s RAM".format(
fullId, hostPort, cores, Utils.megabytesToString(memory)))
}
override def executorRemoved(fullId: String, message: String, exitStatus: Option[Int]) {
val reason: ExecutorLossReason = exitStatus match {
case Some(code) => ExecutorExited(code)
case None => SlaveLost(message)
}
logInfo("Executor %s removed: %s".format(fullId, message))
removeExecutor(fullId.split("/")(1), reason.toString)
}
override def sufficientResourcesRegistered(): Boolean = {
totalCoreCount.get() >= totalExpectedCores * minRegisteredRatio
}
override def applicationId(): String =
Option(appId).getOrElse {
logWarning("Application ID is not initialized yet.")
super.applicationId
}
private def waitForRegistration() = {
registrationLock.synchronized {
while (!registrationDone) {
registrationLock.wait()
}
}
}
private def notifyContext() = {
registrationLock.synchronized {
registrationDone = true
registrationLock.notifyAll()
}
}
}
|
hengyicai/OnlineAggregationUCAS
|
core/src/main/scala/org/apache/spark/scheduler/cluster/SparkDeploySchedulerBackend.scala
|
Scala
|
apache-2.0
| 5,943 |
package net.selenate.server
import akka.util.Timeout
import com.typesafe.config.{ Config, ConfigFactory, ConfigParseOptions, ConfigSyntax }
import java.io.File
import net.selenate.common.exceptions.SeException
import scala.concurrent.duration.{ Duration, FiniteDuration }
trait CBase {
val configOverride = sys.props.get("Selenate.config_override")
val branch = sys.props.get("Selenate.branch")
val userHome = sys.props.get("user.home").getOrElse(throw new SeException("""Key "user.home" not defined in system properties!"""))
val configPath = {
(configOverride, branch) match {
case (Some(c), _) => new File(c)
case (None, Some(b)) => new File(userHome + s"/.config/selenate_$b/")
case (None, None) => new File(userHome + s"/.config/selenate/")
}
}
def parseOpts(allowMissing: Boolean) = ConfigParseOptions
.defaults()
.setAllowMissing(allowMissing)
.setSyntax(ConfigSyntax.CONF);
def loadResourceConfig(resource: String): Config =
ConfigFactory.parseResources(resource, parseOpts(false))
def loadFileConfig(path: File): Config =
ConfigFactory.parseFile(path, parseOpts(true))
protected def parseTimeout(raw: String): Timeout = {
val duration = Duration(raw);
duration match {
case fd: FiniteDuration => new Timeout(fd)
case _ => throw new IllegalArgumentException(s"Given duration $raw cannot be interpreted as a finite duration!")
}
}
}
trait CUtils extends CBase with Loggable {
logTrace(s"""Detected confing override: $configOverride""")
logTrace(s"""Detected branch: $branch""")
logTrace(s"""Detected user home: $userHome""")
logTrace(s"""Detected confing path: $configPath""")
private def loadResourceConfig(name: String, resource: String): Config = {
try {
logDebug(s"""Loading $name resource config from: "$resource"""")
val config = loadResourceConfig(resource)
logTrace(s"""Content of $name resource config "$resource": ${ config.toString }""")
config
} catch {
case e: Exception =>
val msg = s"""An error occured while loading $name resource config "$resource"!"""
logError(msg, e)
throw new SeException(msg, e)
}
}
private def loadFileConfig(name: String, filename: String): Config = {
val path = configPath / filename
try {
logDebug(s"""Loading $name file config from: "$path"""")
val config = loadFileConfig(path)
logTrace(s"""Content of $name file config "$path": ${ config.toString }""")
config
} catch {
case e: Exception =>
val msg = s"""An error occured while loading $name file config "$path"!"""
logError(msg, e)
throw new SeException(msg, e)
}
}
def loadAkkaReference = loadResourceConfig("reference akka", "selenate-akka.reference.config")
def loadAppReference = loadResourceConfig("reference application", "server.reference.config")
def loadAppUser = loadFileConfig("user application", "server.config")
}
|
tferega/selenate
|
code/Server/src/main/scala/net/selenate/server/CUtils.scala
|
Scala
|
bsd-3-clause
| 3,021 |
/*
* Copyright 2010 Twitter, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License. You may obtain
* a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.twitter.io
import java.io.{ByteArrayOutputStream, InputStream, OutputStream}
import scala.annotation.tailrec
object StreamIO {
/**
* Copy an InputStream to an OutputStream in chunks of the given
* buffer size (default = 1KB).
*/
@tailrec
final def copy(
inputStream: InputStream,
outputStream: OutputStream,
bufferSize: Int = 1024
) {
val buf = new Array[Byte](bufferSize)
inputStream.read(buf, 0, buf.length) match {
case -1 => ()
case n =>
outputStream.write(buf, 0, n)
copy(inputStream, outputStream, bufferSize)
}
}
/**
* Buffer (fully) the given input stream by creating & copying it to
* a ByteArrayOutputStream.
*/
def buffer(inputStream: InputStream): ByteArrayOutputStream = {
val bos = new java.io.ByteArrayOutputStream
copy(inputStream, bos)
bos
}
}
|
loiane/Potigol
|
src/com/twitter/io/StreamIO.scala
|
Scala
|
gpl-2.0
| 1,474 |
package skinny.controller.feature
import org.scalatra.test.scalatest.ScalatraFlatSpec
import skinny._
import skinny.controller.SkinnyController
class FormParamsFeatureSpec extends ScalatraFlatSpec {
behavior of "FormParamsFeature"
class Controller extends SkinnyController {
def single = formParams.getAs[String]("foo").getOrElse("<empty>")
def multi = formMultiParams.getAs[String]("foo").map(_.mkString(",")).getOrElse("<empty>")
}
object ctrl extends Controller with Routes {
get("/get")(single).as(Symbol("get"))
post("/post")(single).as(Symbol("post"))
get("/multi/get")(multi).as(Symbol("multiGet"))
post("/multi/post")(multi).as(Symbol("multiPost"))
}
addFilter(ctrl, "/*")
"formMultiParams" should "be available" in {
get("/multi/get?foo=bar&foo=baz") {
status should equal(200)
body should equal("")
}
post("/multi/post", "foo" -> "bar", "foo" -> "baz") {
status should equal(200)
body should equal("bar,baz")
}
post("/multi/post?foo=bar&foo=baz&foo=xxx", "foo" -> "xxx", "foo" -> "yyy") {
status should equal(200)
body should equal("xxx,yyy")
}
}
"formParams" should "be available" in {
get("/get", "foo" -> "bar") {
status should equal(200)
body should equal("<empty>")
}
post("/post", "foo" -> "bar") {
status should equal(200)
body should equal("bar")
}
post("/post?foo=bar", "foo" -> "baz") {
status should equal(200)
body should equal("baz")
}
}
}
|
skinny-framework/skinny-framework
|
framework/src/test/scala/skinny/controller/feature/FormParamsFeatureSpec.scala
|
Scala
|
mit
| 1,534 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.