code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
package dotty.tools.scaladoc
package site
import java.nio.file.Path
case class StaticSiteRoot(
rootTemplate: LoadedTemplate,
siteMappings: Map[Path, Path]
):
lazy val reverseSiteMappings = siteMappings.map(_.swap).toMap
lazy val sources = siteMappings.keys.toSet
lazy val dests = reverseSiteMappings.keys.toSet
|
dotty-staging/dotty
|
scaladoc/src/dotty/tools/scaladoc/site/StaticSiteRoot.scala
|
Scala
|
apache-2.0
| 322 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark
import java.io._
import java.lang.reflect.Constructor
import java.net.URI
import java.util.{Arrays, Locale, Properties, ServiceLoader, UUID}
import java.util.concurrent.{ConcurrentHashMap, ConcurrentMap}
import java.util.concurrent.atomic.{AtomicBoolean, AtomicInteger, AtomicReference}
import scala.collection.JavaConverters._
import scala.collection.Map
import scala.collection.generic.Growable
import scala.collection.mutable.HashMap
import scala.language.implicitConversions
import scala.reflect.{classTag, ClassTag}
import scala.util.control.NonFatal
import com.google.common.collect.MapMaker
import org.apache.commons.lang3.SerializationUtils
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.hadoop.io.{ArrayWritable, BooleanWritable, BytesWritable, DoubleWritable, FloatWritable, IntWritable, LongWritable, NullWritable, Text, Writable}
import org.apache.hadoop.mapred.{FileInputFormat, InputFormat, JobConf, SequenceFileInputFormat, TextInputFormat}
import org.apache.hadoop.mapreduce.{InputFormat => NewInputFormat, Job => NewHadoopJob}
import org.apache.hadoop.mapreduce.lib.input.{FileInputFormat => NewFileInputFormat}
import org.apache.spark.annotation.DeveloperApi
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.deploy.{LocalSparkCluster, SparkHadoopUtil}
import org.apache.spark.input.{FixedLengthBinaryInputFormat, PortableDataStream, StreamInputFormat, WholeTextFileInputFormat}
import org.apache.spark.internal.Logging
import org.apache.spark.internal.config._
import org.apache.spark.io.CompressionCodec
import org.apache.spark.partial.{ApproximateEvaluator, PartialResult}
import org.apache.spark.rdd._
import org.apache.spark.rpc.RpcEndpointRef
import org.apache.spark.scheduler._
import org.apache.spark.scheduler.cluster.{CoarseGrainedSchedulerBackend, StandaloneSchedulerBackend}
import org.apache.spark.scheduler.local.LocalSchedulerBackend
import org.apache.spark.storage._
import org.apache.spark.storage.BlockManagerMessages.TriggerThreadDump
import org.apache.spark.ui.{ConsoleProgressBar, SparkUI}
import org.apache.spark.ui.jobs.JobProgressListener
import org.apache.spark.util._
/**
* Main entry point for Spark functionality. A SparkContext represents the connection to a Spark
* cluster, and can be used to create RDDs, accumulators and broadcast variables on that cluster.
*
* Only one SparkContext may be active per JVM. You must `stop()` the active SparkContext before
* creating a new one. This limitation may eventually be removed; see SPARK-2243 for more details.
*
* @param config a Spark Config object describing the application configuration. Any settings in
* this config overrides the default configs as well as system properties.
*/
class SparkContext(config: SparkConf) extends Logging {
// The call site where this SparkContext was constructed.
private val creationSite: CallSite = Utils.getCallSite()
// If true, log warnings instead of throwing exceptions when multiple SparkContexts are active
private val allowMultipleContexts: Boolean =
config.getBoolean("spark.driver.allowMultipleContexts", false)
// In order to prevent multiple SparkContexts from being active at the same time, mark this
// context as having started construction.
// NOTE: this must be placed at the beginning of the SparkContext constructor.
SparkContext.markPartiallyConstructed(this, allowMultipleContexts)
val startTime = System.currentTimeMillis()
private[spark] val stopped: AtomicBoolean = new AtomicBoolean(false)
private[spark] def assertNotStopped(): Unit = {
if (stopped.get()) {
val activeContext = SparkContext.activeContext.get()
val activeCreationSite =
if (activeContext == null) {
"(No active SparkContext.)"
} else {
activeContext.creationSite.longForm
}
throw new IllegalStateException(
s"""Cannot call methods on a stopped SparkContext.
|This stopped SparkContext was created at:
|
|${creationSite.longForm}
|
|The currently active SparkContext was created at:
|
|$activeCreationSite
""".stripMargin)
}
}
/**
* Create a SparkContext that loads settings from system properties (for instance, when
* launching with ./bin/spark-submit).
*/
def this() = this(new SparkConf())
/**
* Alternative constructor that allows setting common Spark properties directly
*
* @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]).
* @param appName A name for your application, to display on the cluster web UI
* @param conf a [[org.apache.spark.SparkConf]] object specifying other Spark parameters
*/
def this(master: String, appName: String, conf: SparkConf) =
this(SparkContext.updatedConf(conf, master, appName))
/**
* Alternative constructor that allows setting common Spark properties directly
*
* @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]).
* @param appName A name for your application, to display on the cluster web UI.
* @param sparkHome Location where Spark is installed on cluster nodes.
* @param jars Collection of JARs to send to the cluster. These can be paths on the local file
* system or HDFS, HTTP, HTTPS, or FTP URLs.
* @param environment Environment variables to set on worker nodes.
*/
def this(
master: String,
appName: String,
sparkHome: String = null,
jars: Seq[String] = Nil,
environment: Map[String, String] = Map()) = {
this(SparkContext.updatedConf(new SparkConf(), master, appName, sparkHome, jars, environment))
}
// NOTE: The below constructors could be consolidated using default arguments. Due to
// Scala bug SI-8479, however, this causes the compile step to fail when generating docs.
// Until we have a good workaround for that bug the constructors remain broken out.
/**
* Alternative constructor that allows setting common Spark properties directly
*
* @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]).
* @param appName A name for your application, to display on the cluster web UI.
*/
private[spark] def this(master: String, appName: String) =
this(master, appName, null, Nil, Map())
/**
* Alternative constructor that allows setting common Spark properties directly
*
* @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]).
* @param appName A name for your application, to display on the cluster web UI.
* @param sparkHome Location where Spark is installed on cluster nodes.
*/
private[spark] def this(master: String, appName: String, sparkHome: String) =
this(master, appName, sparkHome, Nil, Map())
/**
* Alternative constructor that allows setting common Spark properties directly
*
* @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]).
* @param appName A name for your application, to display on the cluster web UI.
* @param sparkHome Location where Spark is installed on cluster nodes.
* @param jars Collection of JARs to send to the cluster. These can be paths on the local file
* system or HDFS, HTTP, HTTPS, or FTP URLs.
*/
private[spark] def this(master: String, appName: String, sparkHome: String, jars: Seq[String]) =
this(master, appName, sparkHome, jars, Map())
// log out Spark Version in Spark driver log
logInfo(s"Running Spark version $SPARK_VERSION")
warnDeprecatedVersions()
/* ------------------------------------------------------------------------------------- *
| Private variables. These variables keep the internal state of the context, and are |
| not accessible by the outside world. They're mutable since we want to initialize all |
| of them to some neutral value ahead of time, so that calling "stop()" while the |
| constructor is still running is safe. |
* ------------------------------------------------------------------------------------- */
private var _conf: SparkConf = _
private var _eventLogDir: Option[URI] = None
private var _eventLogCodec: Option[String] = None
private var _env: SparkEnv = _
private var _jobProgressListener: JobProgressListener = _
private var _statusTracker: SparkStatusTracker = _
private var _progressBar: Option[ConsoleProgressBar] = None
private var _ui: Option[SparkUI] = None
private var _hadoopConfiguration: Configuration = _
private var _executorMemory: Int = _
private var _schedulerBackend: SchedulerBackend = _
private var _taskScheduler: TaskScheduler = _
private var _heartbeatReceiver: RpcEndpointRef = _
@volatile private var _dagScheduler: DAGScheduler = _
private var _applicationId: String = _
private var _applicationAttemptId: Option[String] = None
private var _eventLogger: Option[EventLoggingListener] = None
private var _executorAllocationManager: Option[ExecutorAllocationManager] = None
private var _cleaner: Option[ContextCleaner] = None
private var _listenerBusStarted: Boolean = false
private var _jars: Seq[String] = _
private var _files: Seq[String] = _
private var _shutdownHookRef: AnyRef = _
/* ------------------------------------------------------------------------------------- *
| Accessors and public fields. These provide access to the internal state of the |
| context. |
* ------------------------------------------------------------------------------------- */
private[spark] def conf: SparkConf = _conf
/**
* Return a copy of this SparkContext's configuration. The configuration ''cannot'' be
* changed at runtime.
*/
def getConf: SparkConf = conf.clone()
def jars: Seq[String] = _jars
def files: Seq[String] = _files
def master: String = _conf.get("spark.master")
def deployMode: String = _conf.getOption("spark.submit.deployMode").getOrElse("client")
def appName: String = _conf.get("spark.app.name")
private[spark] def isEventLogEnabled: Boolean = _conf.getBoolean("spark.eventLog.enabled", false)
private[spark] def eventLogDir: Option[URI] = _eventLogDir
private[spark] def eventLogCodec: Option[String] = _eventLogCodec
def isLocal: Boolean = Utils.isLocalMaster(_conf)
/**
* @return true if context is stopped or in the midst of stopping.
*/
def isStopped: Boolean = stopped.get()
// An asynchronous listener bus for Spark events
private[spark] val listenerBus = new LiveListenerBus(this)
// This function allows components created by SparkEnv to be mocked in unit tests:
private[spark] def createSparkEnv(
conf: SparkConf,
isLocal: Boolean,
listenerBus: LiveListenerBus): SparkEnv = {
SparkEnv.createDriverEnv(conf, isLocal, listenerBus, SparkContext.numDriverCores(master))
}
private[spark] def env: SparkEnv = _env
// Used to store a URL for each static file/jar together with the file's local timestamp
private[spark] val addedFiles = new ConcurrentHashMap[String, Long]().asScala
private[spark] val addedJars = new ConcurrentHashMap[String, Long]().asScala
// Keeps track of all persisted RDDs
private[spark] val persistentRdds = {
val map: ConcurrentMap[Int, RDD[_]] = new MapMaker().weakValues().makeMap[Int, RDD[_]]()
map.asScala
}
private[spark] def jobProgressListener: JobProgressListener = _jobProgressListener
def statusTracker: SparkStatusTracker = _statusTracker
private[spark] def progressBar: Option[ConsoleProgressBar] = _progressBar
private[spark] def ui: Option[SparkUI] = _ui
def uiWebUrl: Option[String] = _ui.map(_.webUrl)
/**
* A default Hadoop Configuration for the Hadoop code (e.g. file systems) that we reuse.
*
* @note As it will be reused in all Hadoop RDDs, it's better not to modify it unless you
* plan to set some global configurations for all Hadoop RDDs.
*/
def hadoopConfiguration: Configuration = _hadoopConfiguration
private[spark] def executorMemory: Int = _executorMemory
// Environment variables to pass to our executors.
private[spark] val executorEnvs = HashMap[String, String]()
// Set SPARK_USER for user who is running SparkContext.
val sparkUser = Utils.getCurrentUserName()
private[spark] def schedulerBackend: SchedulerBackend = _schedulerBackend
private[spark] def taskScheduler: TaskScheduler = _taskScheduler
private[spark] def taskScheduler_=(ts: TaskScheduler): Unit = {
_taskScheduler = ts
}
private[spark] def dagScheduler: DAGScheduler = _dagScheduler
private[spark] def dagScheduler_=(ds: DAGScheduler): Unit = {
_dagScheduler = ds
}
/**
* A unique identifier for the Spark application.
* Its format depends on the scheduler implementation.
* (i.e.
* in case of local spark app something like 'local-1433865536131'
* in case of YARN something like 'application_1433865536131_34483'
* )
*/
def applicationId: String = _applicationId
def applicationAttemptId: Option[String] = _applicationAttemptId
private[spark] def eventLogger: Option[EventLoggingListener] = _eventLogger
private[spark] def executorAllocationManager: Option[ExecutorAllocationManager] =
_executorAllocationManager
private[spark] def cleaner: Option[ContextCleaner] = _cleaner
private[spark] var checkpointDir: Option[String] = None
// Thread Local variable that can be used by users to pass information down the stack
protected[spark] val localProperties = new InheritableThreadLocal[Properties] {
override protected def childValue(parent: Properties): Properties = {
// Note: make a clone such that changes in the parent properties aren't reflected in
// the those of the children threads, which has confusing semantics (SPARK-10563).
SerializationUtils.clone(parent)
}
override protected def initialValue(): Properties = new Properties()
}
/* ------------------------------------------------------------------------------------- *
| Initialization. This code initializes the context in a manner that is exception-safe. |
| All internal fields holding state are initialized here, and any error prompts the |
| stop() method to be called. |
* ------------------------------------------------------------------------------------- */
private def warnSparkMem(value: String): String = {
logWarning("Using SPARK_MEM to set amount of memory to use per executor process is " +
"deprecated, please use spark.executor.memory instead.")
value
}
private def warnDeprecatedVersions(): Unit = {
val javaVersion = System.getProperty("java.version").split("[+.\\\\-]+", 3)
if (scala.util.Properties.releaseVersion.exists(_.startsWith("2.10"))) {
logWarning("Support for Scala 2.10 is deprecated as of Spark 2.1.0")
}
}
/** Control our logLevel. This overrides any user-defined log settings.
* @param logLevel The desired log level as a string.
* Valid log levels include: ALL, DEBUG, ERROR, FATAL, INFO, OFF, TRACE, WARN
*/
def setLogLevel(logLevel: String) {
// let's allow lowercase or mixed case too
val upperCased = logLevel.toUpperCase(Locale.ENGLISH)
require(SparkContext.VALID_LOG_LEVELS.contains(upperCased),
s"Supplied level $logLevel did not match one of:" +
s" ${SparkContext.VALID_LOG_LEVELS.mkString(",")}")
Utils.setLogLevel(org.apache.log4j.Level.toLevel(upperCased))
}
try {
_conf = config.clone()
_conf.validateSettings()
if (!_conf.contains("spark.master")) {
throw new SparkException("A master URL must be set in your configuration")
}
if (!_conf.contains("spark.app.name")) {
throw new SparkException("An application name must be set in your configuration")
}
// log out spark.app.name in the Spark driver logs
logInfo(s"Submitted application: $appName")
// System property spark.yarn.app.id must be set if user code ran by AM on a YARN cluster
if (master == "yarn" && deployMode == "cluster" && !_conf.contains("spark.yarn.app.id")) {
throw new SparkException("Detected yarn cluster mode, but isn't running on a cluster. " +
"Deployment to YARN is not supported directly by SparkContext. Please use spark-submit.")
}
if (_conf.getBoolean("spark.logConf", false)) {
logInfo("Spark configuration:\\n" + _conf.toDebugString)
}
// Set Spark driver host and port system properties. This explicitly sets the configuration
// instead of relying on the default value of the config constant.
_conf.set(DRIVER_HOST_ADDRESS, _conf.get(DRIVER_HOST_ADDRESS))
_conf.setIfMissing("spark.driver.port", "0")
_conf.set("spark.executor.id", SparkContext.DRIVER_IDENTIFIER)
_jars = Utils.getUserJars(_conf)
_files = _conf.getOption("spark.files").map(_.split(",")).map(_.filter(_.nonEmpty))
.toSeq.flatten
_eventLogDir =
if (isEventLogEnabled) {
val unresolvedDir = conf.get("spark.eventLog.dir", EventLoggingListener.DEFAULT_LOG_DIR)
.stripSuffix("/")
Some(Utils.resolveURI(unresolvedDir))
} else {
None
}
_eventLogCodec = {
val compress = _conf.getBoolean("spark.eventLog.compress", false)
if (compress && isEventLogEnabled) {
Some(CompressionCodec.getCodecName(_conf)).map(CompressionCodec.getShortName)
} else {
None
}
}
if (master == "yarn" && deployMode == "client") System.setProperty("SPARK_YARN_MODE", "true")
// "_jobProgressListener" should be set up before creating SparkEnv because when creating
// "SparkEnv", some messages will be posted to "listenerBus" and we should not miss them.
_jobProgressListener = new JobProgressListener(_conf)
listenerBus.addListener(jobProgressListener)
// Create the Spark execution environment (cache, map output tracker, etc)
_env = createSparkEnv(_conf, isLocal, listenerBus)
SparkEnv.set(_env)
// If running the REPL, register the repl's output dir with the file server.
_conf.getOption("spark.repl.class.outputDir").foreach { path =>
val replUri = _env.rpcEnv.fileServer.addDirectory("/classes", new File(path))
_conf.set("spark.repl.class.uri", replUri)
}
_statusTracker = new SparkStatusTracker(this)
_progressBar =
if (_conf.getBoolean("spark.ui.showConsoleProgress", true) && !log.isInfoEnabled) {
Some(new ConsoleProgressBar(this))
} else {
None
}
_ui =
if (conf.getBoolean("spark.ui.enabled", true)) {
Some(SparkUI.createLiveUI(this, _conf, listenerBus, _jobProgressListener,
_env.securityManager, appName, startTime = startTime))
} else {
// For tests, do not enable the UI
None
}
// Bind the UI before starting the task scheduler to communicate
// the bound port to the cluster manager properly
_ui.foreach(_.bind())
_hadoopConfiguration = SparkHadoopUtil.get.newConfiguration(_conf)
// Add each JAR given through the constructor
if (jars != null) {
jars.foreach(addJar)
}
if (files != null) {
files.foreach(addFile)
}
_executorMemory = _conf.getOption("spark.executor.memory")
.orElse(Option(System.getenv("SPARK_EXECUTOR_MEMORY")))
.orElse(Option(System.getenv("SPARK_MEM"))
.map(warnSparkMem))
.map(Utils.memoryStringToMb)
.getOrElse(1024)
// Convert java options to env vars as a work around
// since we can't set env vars directly in sbt.
for { (envKey, propKey) <- Seq(("SPARK_TESTING", "spark.testing"))
value <- Option(System.getenv(envKey)).orElse(Option(System.getProperty(propKey)))} {
executorEnvs(envKey) = value
}
Option(System.getenv("SPARK_PREPEND_CLASSES")).foreach { v =>
executorEnvs("SPARK_PREPEND_CLASSES") = v
}
// The Mesos scheduler backend relies on this environment variable to set executor memory.
// TODO: Set this only in the Mesos scheduler.
executorEnvs("SPARK_EXECUTOR_MEMORY") = executorMemory + "m"
executorEnvs ++= _conf.getExecutorEnv
executorEnvs("SPARK_USER") = sparkUser
// We need to register "HeartbeatReceiver" before "createTaskScheduler" because Executor will
// retrieve "HeartbeatReceiver" in the constructor. (SPARK-6640)
_heartbeatReceiver = env.rpcEnv.setupEndpoint(
HeartbeatReceiver.ENDPOINT_NAME, new HeartbeatReceiver(this))
// Create and start the scheduler
val (sched, ts) = SparkContext.createTaskScheduler(this, master, deployMode)
_schedulerBackend = sched
_taskScheduler = ts
_dagScheduler = new DAGScheduler(this)
_heartbeatReceiver.ask[Boolean](TaskSchedulerIsSet)
// start TaskScheduler after taskScheduler sets DAGScheduler reference in DAGScheduler's
// constructor
_taskScheduler.start()
_applicationId = _taskScheduler.applicationId()
_applicationAttemptId = taskScheduler.applicationAttemptId()
_conf.set("spark.app.id", _applicationId)
if (_conf.getBoolean("spark.ui.reverseProxy", false)) {
System.setProperty("spark.ui.proxyBase", "/proxy/" + _applicationId)
}
_ui.foreach(_.setAppId(_applicationId))
_env.blockManager.initialize(_applicationId)
// The metrics system for Driver need to be set spark.app.id to app ID.
// So it should start after we get app ID from the task scheduler and set spark.app.id.
_env.metricsSystem.start()
// Attach the driver metrics servlet handler to the web ui after the metrics system is started.
_env.metricsSystem.getServletHandlers.foreach(handler => ui.foreach(_.attachHandler(handler)))
_eventLogger =
if (isEventLogEnabled) {
val logger =
new EventLoggingListener(_applicationId, _applicationAttemptId, _eventLogDir.get,
_conf, _hadoopConfiguration)
logger.start()
listenerBus.addListener(logger)
Some(logger)
} else {
None
}
// Optionally scale number of executors dynamically based on workload. Exposed for testing.
val dynamicAllocationEnabled = Utils.isDynamicAllocationEnabled(_conf)
_executorAllocationManager =
if (dynamicAllocationEnabled) {
schedulerBackend match {
case b: ExecutorAllocationClient =>
Some(new ExecutorAllocationManager(
schedulerBackend.asInstanceOf[ExecutorAllocationClient], listenerBus, _conf))
case _ =>
None
}
} else {
None
}
_executorAllocationManager.foreach(_.start())
_cleaner =
if (_conf.getBoolean("spark.cleaner.referenceTracking", true)) {
Some(new ContextCleaner(this))
} else {
None
}
_cleaner.foreach(_.start())
setupAndStartListenerBus()
postEnvironmentUpdate()
postApplicationStart()
// Post init
_taskScheduler.postStartHook()
_env.metricsSystem.registerSource(_dagScheduler.metricsSource)
_env.metricsSystem.registerSource(new BlockManagerSource(_env.blockManager))
_executorAllocationManager.foreach { e =>
_env.metricsSystem.registerSource(e.executorAllocationManagerSource)
}
// Make sure the context is stopped if the user forgets about it. This avoids leaving
// unfinished event logs around after the JVM exits cleanly. It doesn't help if the JVM
// is killed, though.
logDebug("Adding shutdown hook") // force eager creation of logger
_shutdownHookRef = ShutdownHookManager.addShutdownHook(
ShutdownHookManager.SPARK_CONTEXT_SHUTDOWN_PRIORITY) { () =>
logInfo("Invoking stop() from shutdown hook")
stop()
}
} catch {
case NonFatal(e) =>
logError("Error initializing SparkContext.", e)
try {
stop()
} catch {
case NonFatal(inner) =>
logError("Error stopping SparkContext after init error.", inner)
} finally {
throw e
}
}
/**
* Called by the web UI to obtain executor thread dumps. This method may be expensive.
* Logs an error and returns None if we failed to obtain a thread dump, which could occur due
* to an executor being dead or unresponsive or due to network issues while sending the thread
* dump message back to the driver.
*/
private[spark] def getExecutorThreadDump(executorId: String): Option[Array[ThreadStackTrace]] = {
try {
if (executorId == SparkContext.DRIVER_IDENTIFIER) {
Some(Utils.getThreadDump())
} else {
val endpointRef = env.blockManager.master.getExecutorEndpointRef(executorId).get
Some(endpointRef.askSync[Array[ThreadStackTrace]](TriggerThreadDump))
}
} catch {
case e: Exception =>
logError(s"Exception getting thread dump from executor $executorId", e)
None
}
}
private[spark] def getLocalProperties: Properties = localProperties.get()
private[spark] def setLocalProperties(props: Properties) {
localProperties.set(props)
}
/**
* Set a local property that affects jobs submitted from this thread, such as the Spark fair
* scheduler pool. User-defined properties may also be set here. These properties are propagated
* through to worker tasks and can be accessed there via
* [[org.apache.spark.TaskContext#getLocalProperty]].
*
* These properties are inherited by child threads spawned from this thread. This
* may have unexpected consequences when working with thread pools. The standard java
* implementation of thread pools have worker threads spawn other worker threads.
* As a result, local properties may propagate unpredictably.
*/
def setLocalProperty(key: String, value: String) {
if (value == null) {
localProperties.get.remove(key)
} else {
localProperties.get.setProperty(key, value)
}
}
/**
* Get a local property set in this thread, or null if it is missing. See
* `org.apache.spark.SparkContext.setLocalProperty`.
*/
def getLocalProperty(key: String): String =
Option(localProperties.get).map(_.getProperty(key)).orNull
/** Set a human readable description of the current job. */
def setJobDescription(value: String) {
setLocalProperty(SparkContext.SPARK_JOB_DESCRIPTION, value)
}
/**
* Assigns a group ID to all the jobs started by this thread until the group ID is set to a
* different value or cleared.
*
* Often, a unit of execution in an application consists of multiple Spark actions or jobs.
* Application programmers can use this method to group all those jobs together and give a
* group description. Once set, the Spark web UI will associate such jobs with this group.
*
* The application can also use `org.apache.spark.SparkContext.cancelJobGroup` to cancel all
* running jobs in this group. For example,
* {{{
* // In the main thread:
* sc.setJobGroup("some_job_to_cancel", "some job description")
* sc.parallelize(1 to 10000, 2).map { i => Thread.sleep(10); i }.count()
*
* // In a separate thread:
* sc.cancelJobGroup("some_job_to_cancel")
* }}}
*
* @param interruptOnCancel If true, then job cancellation will result in `Thread.interrupt()`
* being called on the job's executor threads. This is useful to help ensure that the tasks
* are actually stopped in a timely manner, but is off by default due to HDFS-1208, where HDFS
* may respond to Thread.interrupt() by marking nodes as dead.
*/
def setJobGroup(groupId: String, description: String, interruptOnCancel: Boolean = false) {
setLocalProperty(SparkContext.SPARK_JOB_DESCRIPTION, description)
setLocalProperty(SparkContext.SPARK_JOB_GROUP_ID, groupId)
// Note: Specifying interruptOnCancel in setJobGroup (rather than cancelJobGroup) avoids
// changing several public APIs and allows Spark cancellations outside of the cancelJobGroup
// APIs to also take advantage of this property (e.g., internal job failures or canceling from
// JobProgressTab UI) on a per-job basis.
setLocalProperty(SparkContext.SPARK_JOB_INTERRUPT_ON_CANCEL, interruptOnCancel.toString)
}
/** Clear the current thread's job group ID and its description. */
def clearJobGroup() {
setLocalProperty(SparkContext.SPARK_JOB_DESCRIPTION, null)
setLocalProperty(SparkContext.SPARK_JOB_GROUP_ID, null)
setLocalProperty(SparkContext.SPARK_JOB_INTERRUPT_ON_CANCEL, null)
}
/**
* Execute a block of code in a scope such that all new RDDs created in this body will
* be part of the same scope. For more detail, see {{org.apache.spark.rdd.RDDOperationScope}}.
*
* @note Return statements are NOT allowed in the given body.
*/
private[spark] def withScope[U](body: => U): U = RDDOperationScope.withScope[U](this)(body)
// Methods for creating RDDs
/** Distribute a local Scala collection to form an RDD.
*
* @note Parallelize acts lazily. If `seq` is a mutable collection and is altered after the call
* to parallelize and before the first action on the RDD, the resultant RDD will reflect the
* modified collection. Pass a copy of the argument to avoid this.
* @note avoid using `parallelize(Seq())` to create an empty `RDD`. Consider `emptyRDD` for an
* RDD with no partitions, or `parallelize(Seq[T]())` for an RDD of `T` with empty partitions.
* @param seq Scala collection to distribute
* @param numSlices number of partitions to divide the collection into
* @return RDD representing distributed collection
*/
def parallelize[T: ClassTag](
seq: Seq[T],
numSlices: Int = defaultParallelism): RDD[T] = withScope {
assertNotStopped()
new ParallelCollectionRDD[T](this, seq, numSlices, Map[Int, Seq[String]]())
}
/**
* Creates a new RDD[Long] containing elements from `start` to `end`(exclusive), increased by
* `step` every element.
*
* @note if we need to cache this RDD, we should make sure each partition does not exceed limit.
*
* @param start the start value.
* @param end the end value.
* @param step the incremental step
* @param numSlices number of partitions to divide the collection into
* @return RDD representing distributed range
*/
def range(
start: Long,
end: Long,
step: Long = 1,
numSlices: Int = defaultParallelism): RDD[Long] = withScope {
assertNotStopped()
// when step is 0, range will run infinitely
require(step != 0, "step cannot be 0")
val numElements: BigInt = {
val safeStart = BigInt(start)
val safeEnd = BigInt(end)
if ((safeEnd - safeStart) % step == 0 || (safeEnd > safeStart) != (step > 0)) {
(safeEnd - safeStart) / step
} else {
// the remainder has the same sign with range, could add 1 more
(safeEnd - safeStart) / step + 1
}
}
parallelize(0 until numSlices, numSlices).mapPartitionsWithIndex { (i, _) =>
val partitionStart = (i * numElements) / numSlices * step + start
val partitionEnd = (((i + 1) * numElements) / numSlices) * step + start
def getSafeMargin(bi: BigInt): Long =
if (bi.isValidLong) {
bi.toLong
} else if (bi > 0) {
Long.MaxValue
} else {
Long.MinValue
}
val safePartitionStart = getSafeMargin(partitionStart)
val safePartitionEnd = getSafeMargin(partitionEnd)
new Iterator[Long] {
private[this] var number: Long = safePartitionStart
private[this] var overflow: Boolean = false
override def hasNext =
if (!overflow) {
if (step > 0) {
number < safePartitionEnd
} else {
number > safePartitionEnd
}
} else false
override def next() = {
val ret = number
number += step
if (number < ret ^ step < 0) {
// we have Long.MaxValue + Long.MaxValue < Long.MaxValue
// and Long.MinValue + Long.MinValue > Long.MinValue, so iff the step causes a step
// back, we are pretty sure that we have an overflow.
overflow = true
}
ret
}
}
}
}
/** Distribute a local Scala collection to form an RDD.
*
* This method is identical to `parallelize`.
* @param seq Scala collection to distribute
* @param numSlices number of partitions to divide the collection into
* @return RDD representing distributed collection
*/
def makeRDD[T: ClassTag](
seq: Seq[T],
numSlices: Int = defaultParallelism): RDD[T] = withScope {
parallelize(seq, numSlices)
}
/**
* Distribute a local Scala collection to form an RDD, with one or more
* location preferences (hostnames of Spark nodes) for each object.
* Create a new partition for each collection item.
* @param seq list of tuples of data and location preferences (hostnames of Spark nodes)
* @return RDD representing data partitioned according to location preferences
*/
def makeRDD[T: ClassTag](seq: Seq[(T, Seq[String])]): RDD[T] = withScope {
assertNotStopped()
val indexToPrefs = seq.zipWithIndex.map(t => (t._2, t._1._2)).toMap
new ParallelCollectionRDD[T](this, seq.map(_._1), math.max(seq.size, 1), indexToPrefs)
}
/**
* Read a text file from HDFS, a local file system (available on all nodes), or any
* Hadoop-supported file system URI, and return it as an RDD of Strings.
* @param path path to the text file on a supported file system
* @param minPartitions suggested minimum number of partitions for the resulting RDD
* @return RDD of lines of the text file
*/
def textFile(
path: String,
minPartitions: Int = defaultMinPartitions): RDD[String] = withScope {
assertNotStopped()
hadoopFile(path, classOf[TextInputFormat], classOf[LongWritable], classOf[Text],
minPartitions).map(pair => pair._2.toString).setName(path)
}
/**
* Read a directory of text files from HDFS, a local file system (available on all nodes), or any
* Hadoop-supported file system URI. Each file is read as a single record and returned in a
* key-value pair, where the key is the path of each file, the value is the content of each file.
*
* <p> For example, if you have the following files:
* {{{
* hdfs://a-hdfs-path/part-00000
* hdfs://a-hdfs-path/part-00001
* ...
* hdfs://a-hdfs-path/part-nnnnn
* }}}
*
* Do `val rdd = sparkContext.wholeTextFile("hdfs://a-hdfs-path")`,
*
* <p> then `rdd` contains
* {{{
* (a-hdfs-path/part-00000, its content)
* (a-hdfs-path/part-00001, its content)
* ...
* (a-hdfs-path/part-nnnnn, its content)
* }}}
*
* @note Small files are preferred, large file is also allowable, but may cause bad performance.
* @note On some filesystems, `.../path/*` can be a more efficient way to read all files
* in a directory rather than `.../path/` or `.../path`
* @note Partitioning is determined by data locality. This may result in too few partitions
* by default.
*
* @param path Directory to the input data files, the path can be comma separated paths as the
* list of inputs.
* @param minPartitions A suggestion value of the minimal splitting number for input data.
* @return RDD representing tuples of file path and the corresponding file content
*/
def wholeTextFiles(
path: String,
minPartitions: Int = defaultMinPartitions): RDD[(String, String)] = withScope {
assertNotStopped()
val job = NewHadoopJob.getInstance(hadoopConfiguration)
// Use setInputPaths so that wholeTextFiles aligns with hadoopFile/textFile in taking
// comma separated files as input. (see SPARK-7155)
NewFileInputFormat.setInputPaths(job, path)
val updateConf = job.getConfiguration
new WholeTextFileRDD(
this,
classOf[WholeTextFileInputFormat],
classOf[Text],
classOf[Text],
updateConf,
minPartitions).map(record => (record._1.toString, record._2.toString)).setName(path)
}
/**
* Get an RDD for a Hadoop-readable dataset as PortableDataStream for each file
* (useful for binary data)
*
* For example, if you have the following files:
* {{{
* hdfs://a-hdfs-path/part-00000
* hdfs://a-hdfs-path/part-00001
* ...
* hdfs://a-hdfs-path/part-nnnnn
* }}}
*
* Do
* `val rdd = sparkContext.binaryFiles("hdfs://a-hdfs-path")`,
*
* then `rdd` contains
* {{{
* (a-hdfs-path/part-00000, its content)
* (a-hdfs-path/part-00001, its content)
* ...
* (a-hdfs-path/part-nnnnn, its content)
* }}}
*
* @note Small files are preferred; very large files may cause bad performance.
* @note On some filesystems, `.../path/*` can be a more efficient way to read all files
* in a directory rather than `.../path/` or `.../path`
* @note Partitioning is determined by data locality. This may result in too few partitions
* by default.
*
* @param path Directory to the input data files, the path can be comma separated paths as the
* list of inputs.
* @param minPartitions A suggestion value of the minimal splitting number for input data.
* @return RDD representing tuples of file path and corresponding file content
*/
def binaryFiles(
path: String,
minPartitions: Int = defaultMinPartitions): RDD[(String, PortableDataStream)] = withScope {
assertNotStopped()
val job = NewHadoopJob.getInstance(hadoopConfiguration)
// Use setInputPaths so that binaryFiles aligns with hadoopFile/textFile in taking
// comma separated files as input. (see SPARK-7155)
NewFileInputFormat.setInputPaths(job, path)
val updateConf = job.getConfiguration
new BinaryFileRDD(
this,
classOf[StreamInputFormat],
classOf[String],
classOf[PortableDataStream],
updateConf,
minPartitions).setName(path)
}
/**
* Load data from a flat binary file, assuming the length of each record is constant.
*
* @note We ensure that the byte array for each record in the resulting RDD
* has the provided record length.
*
* @param path Directory to the input data files, the path can be comma separated paths as the
* list of inputs.
* @param recordLength The length at which to split the records
* @param conf Configuration for setting up the dataset.
*
* @return An RDD of data with values, represented as byte arrays
*/
def binaryRecords(
path: String,
recordLength: Int,
conf: Configuration = hadoopConfiguration): RDD[Array[Byte]] = withScope {
assertNotStopped()
conf.setInt(FixedLengthBinaryInputFormat.RECORD_LENGTH_PROPERTY, recordLength)
val br = newAPIHadoopFile[LongWritable, BytesWritable, FixedLengthBinaryInputFormat](path,
classOf[FixedLengthBinaryInputFormat],
classOf[LongWritable],
classOf[BytesWritable],
conf = conf)
br.map { case (k, v) =>
val bytes = v.copyBytes()
assert(bytes.length == recordLength, "Byte array does not have correct length")
bytes
}
}
/**
* Get an RDD for a Hadoop-readable dataset from a Hadoop JobConf given its InputFormat and other
* necessary info (e.g. file name for a filesystem-based dataset, table name for HyperTable),
* using the older MapReduce API (`org.apache.hadoop.mapred`).
*
* @param conf JobConf for setting up the dataset. Note: This will be put into a Broadcast.
* Therefore if you plan to reuse this conf to create multiple RDDs, you need to make
* sure you won't modify the conf. A safe approach is always creating a new conf for
* a new RDD.
* @param inputFormatClass storage format of the data to be read
* @param keyClass `Class` of the key associated with the `inputFormatClass` parameter
* @param valueClass `Class` of the value associated with the `inputFormatClass` parameter
* @param minPartitions Minimum number of Hadoop Splits to generate.
* @return RDD of tuples of key and corresponding value
*
* @note Because Hadoop's RecordReader class re-uses the same Writable object for each
* record, directly caching the returned RDD or directly passing it to an aggregation or shuffle
* operation will create many references to the same object.
* If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first
* copy them using a `map` function.
*/
def hadoopRDD[K, V](
conf: JobConf,
inputFormatClass: Class[_ <: InputFormat[K, V]],
keyClass: Class[K],
valueClass: Class[V],
minPartitions: Int = defaultMinPartitions): RDD[(K, V)] = withScope {
assertNotStopped()
// This is a hack to enforce loading hdfs-site.xml.
// See SPARK-11227 for details.
FileSystem.getLocal(conf)
// Add necessary security credentials to the JobConf before broadcasting it.
SparkHadoopUtil.get.addCredentials(conf)
new HadoopRDD(this, conf, inputFormatClass, keyClass, valueClass, minPartitions)
}
/** Get an RDD for a Hadoop file with an arbitrary InputFormat
*
* @note Because Hadoop's RecordReader class re-uses the same Writable object for each
* record, directly caching the returned RDD or directly passing it to an aggregation or shuffle
* operation will create many references to the same object.
* If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first
* copy them using a `map` function.
* @param path directory to the input data files, the path can be comma separated paths
* as a list of inputs
* @param inputFormatClass storage format of the data to be read
* @param keyClass `Class` of the key associated with the `inputFormatClass` parameter
* @param valueClass `Class` of the value associated with the `inputFormatClass` parameter
* @param minPartitions suggested minimum number of partitions for the resulting RDD
* @return RDD of tuples of key and corresponding value
*/
def hadoopFile[K, V](
path: String,
inputFormatClass: Class[_ <: InputFormat[K, V]],
keyClass: Class[K],
valueClass: Class[V],
minPartitions: Int = defaultMinPartitions): RDD[(K, V)] = withScope {
assertNotStopped()
// This is a hack to enforce loading hdfs-site.xml.
// See SPARK-11227 for details.
FileSystem.getLocal(hadoopConfiguration)
// A Hadoop configuration can be about 10 KB, which is pretty big, so broadcast it.
val confBroadcast = broadcast(new SerializableConfiguration(hadoopConfiguration))
val setInputPathsFunc = (jobConf: JobConf) => FileInputFormat.setInputPaths(jobConf, path)
new HadoopRDD(
this,
confBroadcast,
Some(setInputPathsFunc),
inputFormatClass,
keyClass,
valueClass,
minPartitions).setName(path)
}
/**
* Smarter version of hadoopFile() that uses class tags to figure out the classes of keys,
* values and the InputFormat so that users don't need to pass them directly. Instead, callers
* can just write, for example,
* {{{
* val file = sparkContext.hadoopFile[LongWritable, Text, TextInputFormat](path, minPartitions)
* }}}
*
* @note Because Hadoop's RecordReader class re-uses the same Writable object for each
* record, directly caching the returned RDD or directly passing it to an aggregation or shuffle
* operation will create many references to the same object.
* If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first
* copy them using a `map` function.
* @param path directory to the input data files, the path can be comma separated paths
* as a list of inputs
* @param minPartitions suggested minimum number of partitions for the resulting RDD
* @return RDD of tuples of key and corresponding value
*/
def hadoopFile[K, V, F <: InputFormat[K, V]]
(path: String, minPartitions: Int)
(implicit km: ClassTag[K], vm: ClassTag[V], fm: ClassTag[F]): RDD[(K, V)] = withScope {
hadoopFile(path,
fm.runtimeClass.asInstanceOf[Class[F]],
km.runtimeClass.asInstanceOf[Class[K]],
vm.runtimeClass.asInstanceOf[Class[V]],
minPartitions)
}
/**
* Smarter version of hadoopFile() that uses class tags to figure out the classes of keys,
* values and the InputFormat so that users don't need to pass them directly. Instead, callers
* can just write, for example,
* {{{
* val file = sparkContext.hadoopFile[LongWritable, Text, TextInputFormat](path)
* }}}
*
* @note Because Hadoop's RecordReader class re-uses the same Writable object for each
* record, directly caching the returned RDD or directly passing it to an aggregation or shuffle
* operation will create many references to the same object.
* If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first
* copy them using a `map` function.
* @param path directory to the input data files, the path can be comma separated paths as
* a list of inputs
* @return RDD of tuples of key and corresponding value
*/
def hadoopFile[K, V, F <: InputFormat[K, V]](path: String)
(implicit km: ClassTag[K], vm: ClassTag[V], fm: ClassTag[F]): RDD[(K, V)] = withScope {
hadoopFile[K, V, F](path, defaultMinPartitions)
}
/**
* Smarter version of `newApiHadoopFile` that uses class tags to figure out the classes of keys,
* values and the `org.apache.hadoop.mapreduce.InputFormat` (new MapReduce API) so that user
* don't need to pass them directly. Instead, callers can just write, for example:
* ```
* val file = sparkContext.hadoopFile[LongWritable, Text, TextInputFormat](path)
* ```
*
* @note Because Hadoop's RecordReader class re-uses the same Writable object for each
* record, directly caching the returned RDD or directly passing it to an aggregation or shuffle
* operation will create many references to the same object.
* If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first
* copy them using a `map` function.
* @param path directory to the input data files, the path can be comma separated paths
* as a list of inputs
* @return RDD of tuples of key and corresponding value
*/
def newAPIHadoopFile[K, V, F <: NewInputFormat[K, V]]
(path: String)
(implicit km: ClassTag[K], vm: ClassTag[V], fm: ClassTag[F]): RDD[(K, V)] = withScope {
newAPIHadoopFile(
path,
fm.runtimeClass.asInstanceOf[Class[F]],
km.runtimeClass.asInstanceOf[Class[K]],
vm.runtimeClass.asInstanceOf[Class[V]])
}
/**
* Get an RDD for a given Hadoop file with an arbitrary new API InputFormat
* and extra configuration options to pass to the input format.
*
* @note Because Hadoop's RecordReader class re-uses the same Writable object for each
* record, directly caching the returned RDD or directly passing it to an aggregation or shuffle
* operation will create many references to the same object.
* If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first
* copy them using a `map` function.
* @param path directory to the input data files, the path can be comma separated paths
* as a list of inputs
* @param fClass storage format of the data to be read
* @param kClass `Class` of the key associated with the `fClass` parameter
* @param vClass `Class` of the value associated with the `fClass` parameter
* @param conf Hadoop configuration
* @return RDD of tuples of key and corresponding value
*/
def newAPIHadoopFile[K, V, F <: NewInputFormat[K, V]](
path: String,
fClass: Class[F],
kClass: Class[K],
vClass: Class[V],
conf: Configuration = hadoopConfiguration): RDD[(K, V)] = withScope {
assertNotStopped()
// This is a hack to enforce loading hdfs-site.xml.
// See SPARK-11227 for details.
FileSystem.getLocal(hadoopConfiguration)
// The call to NewHadoopJob automatically adds security credentials to conf,
// so we don't need to explicitly add them ourselves
val job = NewHadoopJob.getInstance(conf)
// Use setInputPaths so that newAPIHadoopFile aligns with hadoopFile/textFile in taking
// comma separated files as input. (see SPARK-7155)
NewFileInputFormat.setInputPaths(job, path)
val updatedConf = job.getConfiguration
new NewHadoopRDD(this, fClass, kClass, vClass, updatedConf).setName(path)
}
/**
* Get an RDD for a given Hadoop file with an arbitrary new API InputFormat
* and extra configuration options to pass to the input format.
*
* @param conf Configuration for setting up the dataset. Note: This will be put into a Broadcast.
* Therefore if you plan to reuse this conf to create multiple RDDs, you need to make
* sure you won't modify the conf. A safe approach is always creating a new conf for
* a new RDD.
* @param fClass storage format of the data to be read
* @param kClass `Class` of the key associated with the `fClass` parameter
* @param vClass `Class` of the value associated with the `fClass` parameter
*
* @note Because Hadoop's RecordReader class re-uses the same Writable object for each
* record, directly caching the returned RDD or directly passing it to an aggregation or shuffle
* operation will create many references to the same object.
* If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first
* copy them using a `map` function.
*/
def newAPIHadoopRDD[K, V, F <: NewInputFormat[K, V]](
conf: Configuration = hadoopConfiguration,
fClass: Class[F],
kClass: Class[K],
vClass: Class[V]): RDD[(K, V)] = withScope {
assertNotStopped()
// This is a hack to enforce loading hdfs-site.xml.
// See SPARK-11227 for details.
FileSystem.getLocal(conf)
// Add necessary security credentials to the JobConf. Required to access secure HDFS.
val jconf = new JobConf(conf)
SparkHadoopUtil.get.addCredentials(jconf)
new NewHadoopRDD(this, fClass, kClass, vClass, jconf)
}
/**
* Get an RDD for a Hadoop SequenceFile with given key and value types.
*
* @note Because Hadoop's RecordReader class re-uses the same Writable object for each
* record, directly caching the returned RDD or directly passing it to an aggregation or shuffle
* operation will create many references to the same object.
* If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first
* copy them using a `map` function.
* @param path directory to the input data files, the path can be comma separated paths
* as a list of inputs
* @param keyClass `Class` of the key associated with `SequenceFileInputFormat`
* @param valueClass `Class` of the value associated with `SequenceFileInputFormat`
* @param minPartitions suggested minimum number of partitions for the resulting RDD
* @return RDD of tuples of key and corresponding value
*/
def sequenceFile[K, V](path: String,
keyClass: Class[K],
valueClass: Class[V],
minPartitions: Int
): RDD[(K, V)] = withScope {
assertNotStopped()
val inputFormatClass = classOf[SequenceFileInputFormat[K, V]]
hadoopFile(path, inputFormatClass, keyClass, valueClass, minPartitions)
}
/**
* Get an RDD for a Hadoop SequenceFile with given key and value types.
*
* @note Because Hadoop's RecordReader class re-uses the same Writable object for each
* record, directly caching the returned RDD or directly passing it to an aggregation or shuffle
* operation will create many references to the same object.
* If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first
* copy them using a `map` function.
* @param path directory to the input data files, the path can be comma separated paths
* as a list of inputs
* @param keyClass `Class` of the key associated with `SequenceFileInputFormat`
* @param valueClass `Class` of the value associated with `SequenceFileInputFormat`
* @return RDD of tuples of key and corresponding value
*/
def sequenceFile[K, V](
path: String,
keyClass: Class[K],
valueClass: Class[V]): RDD[(K, V)] = withScope {
assertNotStopped()
sequenceFile(path, keyClass, valueClass, defaultMinPartitions)
}
/**
* Version of sequenceFile() for types implicitly convertible to Writables through a
* WritableConverter. For example, to access a SequenceFile where the keys are Text and the
* values are IntWritable, you could simply write
* {{{
* sparkContext.sequenceFile[String, Int](path, ...)
* }}}
*
* WritableConverters are provided in a somewhat strange way (by an implicit function) to support
* both subclasses of Writable and types for which we define a converter (e.g. Int to
* IntWritable). The most natural thing would've been to have implicit objects for the
* converters, but then we couldn't have an object for every subclass of Writable (you can't
* have a parameterized singleton object). We use functions instead to create a new converter
* for the appropriate type. In addition, we pass the converter a ClassTag of its type to
* allow it to figure out the Writable class to use in the subclass case.
*
* @note Because Hadoop's RecordReader class re-uses the same Writable object for each
* record, directly caching the returned RDD or directly passing it to an aggregation or shuffle
* operation will create many references to the same object.
* If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first
* copy them using a `map` function.
* @param path directory to the input data files, the path can be comma separated paths
* as a list of inputs
* @param minPartitions suggested minimum number of partitions for the resulting RDD
* @return RDD of tuples of key and corresponding value
*/
def sequenceFile[K, V]
(path: String, minPartitions: Int = defaultMinPartitions)
(implicit km: ClassTag[K], vm: ClassTag[V],
kcf: () => WritableConverter[K], vcf: () => WritableConverter[V]): RDD[(K, V)] = {
withScope {
assertNotStopped()
val kc = clean(kcf)()
val vc = clean(vcf)()
val format = classOf[SequenceFileInputFormat[Writable, Writable]]
val writables = hadoopFile(path, format,
kc.writableClass(km).asInstanceOf[Class[Writable]],
vc.writableClass(vm).asInstanceOf[Class[Writable]], minPartitions)
writables.map { case (k, v) => (kc.convert(k), vc.convert(v)) }
}
}
/**
* Load an RDD saved as a SequenceFile containing serialized objects, with NullWritable keys and
* BytesWritable values that contain a serialized partition. This is still an experimental
* storage format and may not be supported exactly as is in future Spark releases. It will also
* be pretty slow if you use the default serializer (Java serialization),
* though the nice thing about it is that there's very little effort required to save arbitrary
* objects.
*
* @param path directory to the input data files, the path can be comma separated paths
* as a list of inputs
* @param minPartitions suggested minimum number of partitions for the resulting RDD
* @return RDD representing deserialized data from the file(s)
*/
def objectFile[T: ClassTag](
path: String,
minPartitions: Int = defaultMinPartitions): RDD[T] = withScope {
assertNotStopped()
sequenceFile(path, classOf[NullWritable], classOf[BytesWritable], minPartitions)
.flatMap(x => Utils.deserialize[Array[T]](x._2.getBytes, Utils.getContextOrSparkClassLoader))
}
protected[spark] def checkpointFile[T: ClassTag](path: String): RDD[T] = withScope {
new ReliableCheckpointRDD[T](this, path)
}
/** Build the union of a list of RDDs. */
def union[T: ClassTag](rdds: Seq[RDD[T]]): RDD[T] = withScope {
val partitioners = rdds.flatMap(_.partitioner).toSet
if (rdds.forall(_.partitioner.isDefined) && partitioners.size == 1) {
new PartitionerAwareUnionRDD(this, rdds)
} else {
new UnionRDD(this, rdds)
}
}
/** Build the union of a list of RDDs passed as variable-length arguments. */
def union[T: ClassTag](first: RDD[T], rest: RDD[T]*): RDD[T] = withScope {
union(Seq(first) ++ rest)
}
/** Get an RDD that has no partitions or elements. */
def emptyRDD[T: ClassTag]: RDD[T] = new EmptyRDD[T](this)
// Methods for creating shared variables
/**
* Create an [[org.apache.spark.Accumulator]] variable of a given type, which tasks can "add"
* values to using the `+=` method. Only the driver can access the accumulator's `value`.
*/
@deprecated("use AccumulatorV2", "2.0.0")
def accumulator[T](initialValue: T)(implicit param: AccumulatorParam[T]): Accumulator[T] = {
val acc = new Accumulator(initialValue, param)
cleaner.foreach(_.registerAccumulatorForCleanup(acc.newAcc))
acc
}
/**
* Create an [[org.apache.spark.Accumulator]] variable of a given type, with a name for display
* in the Spark UI. Tasks can "add" values to the accumulator using the `+=` method. Only the
* driver can access the accumulator's `value`.
*/
@deprecated("use AccumulatorV2", "2.0.0")
def accumulator[T](initialValue: T, name: String)(implicit param: AccumulatorParam[T])
: Accumulator[T] = {
val acc = new Accumulator(initialValue, param, Some(name))
cleaner.foreach(_.registerAccumulatorForCleanup(acc.newAcc))
acc
}
/**
* Create an [[org.apache.spark.Accumulable]] shared variable, to which tasks can add values
* with `+=`. Only the driver can access the accumulable's `value`.
* @tparam R accumulator result type
* @tparam T type that can be added to the accumulator
*/
@deprecated("use AccumulatorV2", "2.0.0")
def accumulable[R, T](initialValue: R)(implicit param: AccumulableParam[R, T])
: Accumulable[R, T] = {
val acc = new Accumulable(initialValue, param)
cleaner.foreach(_.registerAccumulatorForCleanup(acc.newAcc))
acc
}
/**
* Create an [[org.apache.spark.Accumulable]] shared variable, with a name for display in the
* Spark UI. Tasks can add values to the accumulable using the `+=` operator. Only the driver can
* access the accumulable's `value`.
* @tparam R accumulator result type
* @tparam T type that can be added to the accumulator
*/
@deprecated("use AccumulatorV2", "2.0.0")
def accumulable[R, T](initialValue: R, name: String)(implicit param: AccumulableParam[R, T])
: Accumulable[R, T] = {
val acc = new Accumulable(initialValue, param, Some(name))
cleaner.foreach(_.registerAccumulatorForCleanup(acc.newAcc))
acc
}
/**
* Create an accumulator from a "mutable collection" type.
*
* Growable and TraversableOnce are the standard APIs that guarantee += and ++=, implemented by
* standard mutable collections. So you can use this with mutable Map, Set, etc.
*/
@deprecated("use AccumulatorV2", "2.0.0")
def accumulableCollection[R <% Growable[T] with TraversableOnce[T] with Serializable: ClassTag, T]
(initialValue: R): Accumulable[R, T] = {
val param = new GrowableAccumulableParam[R, T]
val acc = new Accumulable(initialValue, param)
cleaner.foreach(_.registerAccumulatorForCleanup(acc.newAcc))
acc
}
/**
* Register the given accumulator.
*
* @note Accumulators must be registered before use, or it will throw exception.
*/
def register(acc: AccumulatorV2[_, _]): Unit = {
acc.register(this)
}
/**
* Register the given accumulator with given name.
*
* @note Accumulators must be registered before use, or it will throw exception.
*/
def register(acc: AccumulatorV2[_, _], name: String): Unit = {
acc.register(this, name = Some(name))
}
/**
* Create and register a long accumulator, which starts with 0 and accumulates inputs by `add`.
*/
def longAccumulator: LongAccumulator = {
val acc = new LongAccumulator
register(acc)
acc
}
/**
* Create and register a long accumulator, which starts with 0 and accumulates inputs by `add`.
*/
def longAccumulator(name: String): LongAccumulator = {
val acc = new LongAccumulator
register(acc, name)
acc
}
/**
* Create and register a double accumulator, which starts with 0 and accumulates inputs by `add`.
*/
def doubleAccumulator: DoubleAccumulator = {
val acc = new DoubleAccumulator
register(acc)
acc
}
/**
* Create and register a double accumulator, which starts with 0 and accumulates inputs by `add`.
*/
def doubleAccumulator(name: String): DoubleAccumulator = {
val acc = new DoubleAccumulator
register(acc, name)
acc
}
/**
* Create and register a `CollectionAccumulator`, which starts with empty list and accumulates
* inputs by adding them into the list.
*/
def collectionAccumulator[T]: CollectionAccumulator[T] = {
val acc = new CollectionAccumulator[T]
register(acc)
acc
}
/**
* Create and register a `CollectionAccumulator`, which starts with empty list and accumulates
* inputs by adding them into the list.
*/
def collectionAccumulator[T](name: String): CollectionAccumulator[T] = {
val acc = new CollectionAccumulator[T]
register(acc, name)
acc
}
/**
* Broadcast a read-only variable to the cluster, returning a
* [[org.apache.spark.broadcast.Broadcast]] object for reading it in distributed functions.
* The variable will be sent to each cluster only once.
*
* @param value value to broadcast to the Spark nodes
* @return `Broadcast` object, a read-only variable cached on each machine
*/
def broadcast[T: ClassTag](value: T): Broadcast[T] = {
assertNotStopped()
require(!classOf[RDD[_]].isAssignableFrom(classTag[T].runtimeClass),
"Can not directly broadcast RDDs; instead, call collect() and broadcast the result.")
val bc = env.broadcastManager.newBroadcast[T](value, isLocal)
val callSite = getCallSite
logInfo("Created broadcast " + bc.id + " from " + callSite.shortForm)
cleaner.foreach(_.registerBroadcastForCleanup(bc))
bc
}
/**
* Add a file to be downloaded with this Spark job on every node.
*
* @param path can be either a local file, a file in HDFS (or other Hadoop-supported
* filesystems), or an HTTP, HTTPS or FTP URI. To access the file in Spark jobs,
* use `SparkFiles.get(fileName)` to find its download location.
*/
def addFile(path: String): Unit = {
addFile(path, false)
}
/**
* Returns a list of file paths that are added to resources.
*/
def listFiles(): Seq[String] = addedFiles.keySet.toSeq
/**
* Add a file to be downloaded with this Spark job on every node.
*
* @param path can be either a local file, a file in HDFS (or other Hadoop-supported
* filesystems), or an HTTP, HTTPS or FTP URI. To access the file in Spark jobs,
* use `SparkFiles.get(fileName)` to find its download location.
* @param recursive if true, a directory can be given in `path`. Currently directories are
* only supported for Hadoop-supported filesystems.
*/
def addFile(path: String, recursive: Boolean): Unit = {
val uri = new Path(path).toUri
val schemeCorrectedPath = uri.getScheme match {
case null | "local" => new File(path).getCanonicalFile.toURI.toString
case _ => path
}
val hadoopPath = new Path(schemeCorrectedPath)
val scheme = new URI(schemeCorrectedPath).getScheme
if (!Array("http", "https", "ftp").contains(scheme)) {
val fs = hadoopPath.getFileSystem(hadoopConfiguration)
val isDir = fs.getFileStatus(hadoopPath).isDirectory
if (!isLocal && scheme == "file" && isDir) {
throw new SparkException(s"addFile does not support local directories when not running " +
"local mode.")
}
if (!recursive && isDir) {
throw new SparkException(s"Added file $hadoopPath is a directory and recursive is not " +
"turned on.")
}
} else {
// SPARK-17650: Make sure this is a valid URL before adding it to the list of dependencies
Utils.validateURL(uri)
}
val key = if (!isLocal && scheme == "file") {
env.rpcEnv.fileServer.addFile(new File(uri.getPath))
} else {
schemeCorrectedPath
}
val timestamp = System.currentTimeMillis
if (addedFiles.putIfAbsent(key, timestamp).isEmpty) {
logInfo(s"Added file $path at $key with timestamp $timestamp")
// Fetch the file locally so that closures which are run on the driver can still use the
// SparkFiles API to access files.
Utils.fetchFile(uri.toString, new File(SparkFiles.getRootDirectory()), conf,
env.securityManager, hadoopConfiguration, timestamp, useCache = false)
postEnvironmentUpdate()
}
}
/**
* :: DeveloperApi ::
* Register a listener to receive up-calls from events that happen during execution.
*/
@DeveloperApi
def addSparkListener(listener: SparkListenerInterface) {
listenerBus.addListener(listener)
}
/**
* :: DeveloperApi ::
* Deregister the listener from Spark's listener bus.
*/
@DeveloperApi
def removeSparkListener(listener: SparkListenerInterface): Unit = {
listenerBus.removeListener(listener)
}
private[spark] def getExecutorIds(): Seq[String] = {
schedulerBackend match {
case b: CoarseGrainedSchedulerBackend =>
b.getExecutorIds()
case _ =>
logWarning("Requesting executors is only supported in coarse-grained mode")
Nil
}
}
/**
* Update the cluster manager on our scheduling needs. Three bits of information are included
* to help it make decisions.
* @param numExecutors The total number of executors we'd like to have. The cluster manager
* shouldn't kill any running executor to reach this number, but,
* if all existing executors were to die, this is the number of executors
* we'd want to be allocated.
* @param localityAwareTasks The number of tasks in all active stages that have a locality
* preferences. This includes running, pending, and completed tasks.
* @param hostToLocalTaskCount A map of hosts to the number of tasks from all active stages
* that would like to like to run on that host.
* This includes running, pending, and completed tasks.
* @return whether the request is acknowledged by the cluster manager.
*/
@DeveloperApi
def requestTotalExecutors(
numExecutors: Int,
localityAwareTasks: Int,
hostToLocalTaskCount: scala.collection.immutable.Map[String, Int]
): Boolean = {
schedulerBackend match {
case b: CoarseGrainedSchedulerBackend =>
b.requestTotalExecutors(numExecutors, localityAwareTasks, hostToLocalTaskCount)
case _ =>
logWarning("Requesting executors is only supported in coarse-grained mode")
false
}
}
/**
* :: DeveloperApi ::
* Request an additional number of executors from the cluster manager.
* @return whether the request is received.
*/
@DeveloperApi
def requestExecutors(numAdditionalExecutors: Int): Boolean = {
schedulerBackend match {
case b: CoarseGrainedSchedulerBackend =>
b.requestExecutors(numAdditionalExecutors)
case _ =>
logWarning("Requesting executors is only supported in coarse-grained mode")
false
}
}
/**
* :: DeveloperApi ::
* Request that the cluster manager kill the specified executors.
*
* @note This is an indication to the cluster manager that the application wishes to adjust
* its resource usage downwards. If the application wishes to replace the executors it kills
* through this method with new ones, it should follow up explicitly with a call to
* {{SparkContext#requestExecutors}}.
*
* @return whether the request is received.
*/
@DeveloperApi
def killExecutors(executorIds: Seq[String]): Boolean = {
schedulerBackend match {
case b: CoarseGrainedSchedulerBackend =>
b.killExecutors(executorIds, replace = false, force = true).nonEmpty
case _ =>
logWarning("Killing executors is only supported in coarse-grained mode")
false
}
}
/**
* :: DeveloperApi ::
* Request that the cluster manager kill the specified executor.
*
* @note This is an indication to the cluster manager that the application wishes to adjust
* its resource usage downwards. If the application wishes to replace the executor it kills
* through this method with a new one, it should follow up explicitly with a call to
* {{SparkContext#requestExecutors}}.
*
* @return whether the request is received.
*/
@DeveloperApi
def killExecutor(executorId: String): Boolean = killExecutors(Seq(executorId))
/**
* Request that the cluster manager kill the specified executor without adjusting the
* application resource requirements.
*
* The effect is that a new executor will be launched in place of the one killed by
* this request. This assumes the cluster manager will automatically and eventually
* fulfill all missing application resource requests.
*
* @note The replace is by no means guaranteed; another application on the same cluster
* can steal the window of opportunity and acquire this application's resources in the
* mean time.
*
* @return whether the request is received.
*/
private[spark] def killAndReplaceExecutor(executorId: String): Boolean = {
schedulerBackend match {
case b: CoarseGrainedSchedulerBackend =>
b.killExecutors(Seq(executorId), replace = true, force = true).nonEmpty
case _ =>
logWarning("Killing executors is only supported in coarse-grained mode")
false
}
}
/** The version of Spark on which this application is running. */
def version: String = SPARK_VERSION
/**
* Return a map from the slave to the max memory available for caching and the remaining
* memory available for caching.
*/
def getExecutorMemoryStatus: Map[String, (Long, Long)] = {
assertNotStopped()
env.blockManager.master.getMemoryStatus.map { case(blockManagerId, mem) =>
(blockManagerId.host + ":" + blockManagerId.port, mem)
}
}
/**
* :: DeveloperApi ::
* Return information about what RDDs are cached, if they are in mem or on disk, how much space
* they take, etc.
*/
@DeveloperApi
def getRDDStorageInfo: Array[RDDInfo] = {
getRDDStorageInfo(_ => true)
}
private[spark] def getRDDStorageInfo(filter: RDD[_] => Boolean): Array[RDDInfo] = {
assertNotStopped()
val rddInfos = persistentRdds.values.filter(filter).map(RDDInfo.fromRdd).toArray
StorageUtils.updateRddInfo(rddInfos, getExecutorStorageStatus)
rddInfos.filter(_.isCached)
}
/**
* Returns an immutable map of RDDs that have marked themselves as persistent via cache() call.
*
* @note This does not necessarily mean the caching or computation was successful.
*/
def getPersistentRDDs: Map[Int, RDD[_]] = persistentRdds.toMap
/**
* :: DeveloperApi ::
* Return information about blocks stored in all of the slaves
*/
@DeveloperApi
def getExecutorStorageStatus: Array[StorageStatus] = {
assertNotStopped()
env.blockManager.master.getStorageStatus
}
/**
* :: DeveloperApi ::
* Return pools for fair scheduler
*/
@DeveloperApi
def getAllPools: Seq[Schedulable] = {
assertNotStopped()
// TODO(xiajunluan): We should take nested pools into account
taskScheduler.rootPool.schedulableQueue.asScala.toSeq
}
/**
* :: DeveloperApi ::
* Return the pool associated with the given name, if one exists
*/
@DeveloperApi
def getPoolForName(pool: String): Option[Schedulable] = {
assertNotStopped()
Option(taskScheduler.rootPool.schedulableNameToSchedulable.get(pool))
}
/**
* Return current scheduling mode
*/
def getSchedulingMode: SchedulingMode.SchedulingMode = {
assertNotStopped()
taskScheduler.schedulingMode
}
/**
* Gets the locality information associated with the partition in a particular rdd
* @param rdd of interest
* @param partition to be looked up for locality
* @return list of preferred locations for the partition
*/
private [spark] def getPreferredLocs(rdd: RDD[_], partition: Int): Seq[TaskLocation] = {
dagScheduler.getPreferredLocs(rdd, partition)
}
/**
* Register an RDD to be persisted in memory and/or disk storage
*/
private[spark] def persistRDD(rdd: RDD[_]) {
persistentRdds(rdd.id) = rdd
}
/**
* Unpersist an RDD from memory and/or disk storage
*/
private[spark] def unpersistRDD(rddId: Int, blocking: Boolean = true) {
env.blockManager.master.removeRdd(rddId, blocking)
persistentRdds.remove(rddId)
listenerBus.post(SparkListenerUnpersistRDD(rddId))
}
/**
* Adds a JAR dependency for all tasks to be executed on this `SparkContext` in the future.
* @param path can be either a local file, a file in HDFS (or other Hadoop-supported filesystems),
* an HTTP, HTTPS or FTP URI, or local:/path for a file on every worker node.
*/
def addJar(path: String) {
if (path == null) {
logWarning("null specified as parameter to addJar")
} else {
var key = ""
if (path.contains("\\\\")) {
// For local paths with backslashes on Windows, URI throws an exception
key = env.rpcEnv.fileServer.addJar(new File(path))
} else {
val uri = new URI(path)
// SPARK-17650: Make sure this is a valid URL before adding it to the list of dependencies
Utils.validateURL(uri)
key = uri.getScheme match {
// A JAR file which exists only on the driver node
case null | "file" =>
try {
val file = new File(uri.getPath)
if (!file.exists()) {
throw new FileNotFoundException(s"Jar ${file.getAbsolutePath} not found")
}
if (file.isDirectory) {
throw new IllegalArgumentException(
s"Directory ${file.getAbsoluteFile} is not allowed for addJar")
}
env.rpcEnv.fileServer.addJar(new File(uri.getPath))
} catch {
case NonFatal(e) =>
logError(s"Failed to add $path to Spark environment", e)
null
}
// A JAR file which exists locally on every worker node
case "local" =>
"file:" + uri.getPath
case _ =>
path
}
}
if (key != null) {
val timestamp = System.currentTimeMillis
if (addedJars.putIfAbsent(key, timestamp).isEmpty) {
logInfo(s"Added JAR $path at $key with timestamp $timestamp")
postEnvironmentUpdate()
}
}
}
}
/**
* Returns a list of jar files that are added to resources.
*/
def listJars(): Seq[String] = addedJars.keySet.toSeq
/**
* When stopping SparkContext inside Spark components, it's easy to cause dead-lock since Spark
* may wait for some internal threads to finish. It's better to use this method to stop
* SparkContext instead.
*/
private[spark] def stopInNewThread(): Unit = {
new Thread("stop-spark-context") {
setDaemon(true)
override def run(): Unit = {
try {
SparkContext.this.stop()
} catch {
case e: Throwable =>
logError(e.getMessage, e)
throw e
}
}
}.start()
}
/**
* Shut down the SparkContext.
*/
def stop(): Unit = {
if (LiveListenerBus.withinListenerThread.value) {
throw new SparkException(
s"Cannot stop SparkContext within listener thread of ${LiveListenerBus.name}")
}
// Use the stopping variable to ensure no contention for the stop scenario.
// Still track the stopped variable for use elsewhere in the code.
if (!stopped.compareAndSet(false, true)) {
logInfo("SparkContext already stopped.")
return
}
if (_shutdownHookRef != null) {
ShutdownHookManager.removeShutdownHook(_shutdownHookRef)
}
Utils.tryLogNonFatalError {
postApplicationEnd()
}
Utils.tryLogNonFatalError {
_ui.foreach(_.stop())
}
if (env != null) {
Utils.tryLogNonFatalError {
env.metricsSystem.report()
}
}
Utils.tryLogNonFatalError {
_cleaner.foreach(_.stop())
}
Utils.tryLogNonFatalError {
_executorAllocationManager.foreach(_.stop())
}
if (_listenerBusStarted) {
Utils.tryLogNonFatalError {
listenerBus.stop()
_listenerBusStarted = false
}
}
Utils.tryLogNonFatalError {
_eventLogger.foreach(_.stop())
}
if (_dagScheduler != null) {
Utils.tryLogNonFatalError {
_dagScheduler.stop()
}
_dagScheduler = null
}
if (env != null && _heartbeatReceiver != null) {
Utils.tryLogNonFatalError {
env.rpcEnv.stop(_heartbeatReceiver)
}
}
Utils.tryLogNonFatalError {
_progressBar.foreach(_.stop())
}
_taskScheduler = null
// TODO: Cache.stop()?
if (_env != null) {
Utils.tryLogNonFatalError {
_env.stop()
}
SparkEnv.set(null)
}
// Unset YARN mode system env variable, to allow switching between cluster types.
System.clearProperty("SPARK_YARN_MODE")
SparkContext.clearActiveContext()
logInfo("Successfully stopped SparkContext")
}
/**
* Get Spark's home location from either a value set through the constructor,
* or the spark.home Java property, or the SPARK_HOME environment variable
* (in that order of preference). If neither of these is set, return None.
*/
private[spark] def getSparkHome(): Option[String] = {
conf.getOption("spark.home").orElse(Option(System.getenv("SPARK_HOME")))
}
/**
* Set the thread-local property for overriding the call sites
* of actions and RDDs.
*/
def setCallSite(shortCallSite: String) {
setLocalProperty(CallSite.SHORT_FORM, shortCallSite)
}
/**
* Set the thread-local property for overriding the call sites
* of actions and RDDs.
*/
private[spark] def setCallSite(callSite: CallSite) {
setLocalProperty(CallSite.SHORT_FORM, callSite.shortForm)
setLocalProperty(CallSite.LONG_FORM, callSite.longForm)
}
/**
* Clear the thread-local property for overriding the call sites
* of actions and RDDs.
*/
def clearCallSite() {
setLocalProperty(CallSite.SHORT_FORM, null)
setLocalProperty(CallSite.LONG_FORM, null)
}
/**
* Capture the current user callsite and return a formatted version for printing. If the user
* has overridden the call site using `setCallSite()`, this will return the user's version.
*/
private[spark] def getCallSite(): CallSite = {
lazy val callSite = Utils.getCallSite()
CallSite(
Option(getLocalProperty(CallSite.SHORT_FORM)).getOrElse(callSite.shortForm),
Option(getLocalProperty(CallSite.LONG_FORM)).getOrElse(callSite.longForm)
)
}
/**
* Run a function on a given set of partitions in an RDD and pass the results to the given
* handler function. This is the main entry point for all actions in Spark.
*
* @param rdd target RDD to run tasks on
* @param func a function to run on each partition of the RDD
* @param partitions set of partitions to run on; some jobs may not want to compute on all
* partitions of the target RDD, e.g. for operations like `first()`
* @param resultHandler callback to pass each result to
*/
def runJob[T, U: ClassTag](
rdd: RDD[T],
func: (TaskContext, Iterator[T]) => U,
partitions: Seq[Int],
resultHandler: (Int, U) => Unit): Unit = {
if (stopped.get()) {
throw new IllegalStateException("SparkContext has been shutdown")
}
val callSite = getCallSite
val cleanedFunc = clean(func)
logInfo("Starting job: " + callSite.shortForm)
if (conf.getBoolean("spark.logLineage", false)) {
logInfo("RDD's recursive dependencies:\\n" + rdd.toDebugString)
}
dagScheduler.runJob(rdd, cleanedFunc, partitions, callSite, resultHandler, localProperties.get)
progressBar.foreach(_.finishAll())
rdd.doCheckpoint()
}
/**
* Run a function on a given set of partitions in an RDD and return the results as an array.
* The function that is run against each partition additionally takes `TaskContext` argument.
*
* @param rdd target RDD to run tasks on
* @param func a function to run on each partition of the RDD
* @param partitions set of partitions to run on; some jobs may not want to compute on all
* partitions of the target RDD, e.g. for operations like `first()`
* @return in-memory collection with a result of the job (each collection element will contain
* a result from one partition)
*/
def runJob[T, U: ClassTag](
rdd: RDD[T],
func: (TaskContext, Iterator[T]) => U,
partitions: Seq[Int]): Array[U] = {
val results = new Array[U](partitions.size)
runJob[T, U](rdd, func, partitions, (index, res) => results(index) = res)
results
}
/**
* Run a function on a given set of partitions in an RDD and return the results as an array.
*
* @param rdd target RDD to run tasks on
* @param func a function to run on each partition of the RDD
* @param partitions set of partitions to run on; some jobs may not want to compute on all
* partitions of the target RDD, e.g. for operations like `first()`
* @return in-memory collection with a result of the job (each collection element will contain
* a result from one partition)
*/
def runJob[T, U: ClassTag](
rdd: RDD[T],
func: Iterator[T] => U,
partitions: Seq[Int]): Array[U] = {
val cleanedFunc = clean(func)
runJob(rdd, (ctx: TaskContext, it: Iterator[T]) => cleanedFunc(it), partitions)
}
/**
* Run a job on all partitions in an RDD and return the results in an array. The function
* that is run against each partition additionally takes `TaskContext` argument.
*
* @param rdd target RDD to run tasks on
* @param func a function to run on each partition of the RDD
* @return in-memory collection with a result of the job (each collection element will contain
* a result from one partition)
*/
def runJob[T, U: ClassTag](rdd: RDD[T], func: (TaskContext, Iterator[T]) => U): Array[U] = {
runJob(rdd, func, 0 until rdd.partitions.length)
}
/**
* Run a job on all partitions in an RDD and return the results in an array.
*
* @param rdd target RDD to run tasks on
* @param func a function to run on each partition of the RDD
* @return in-memory collection with a result of the job (each collection element will contain
* a result from one partition)
*/
def runJob[T, U: ClassTag](rdd: RDD[T], func: Iterator[T] => U): Array[U] = {
runJob(rdd, func, 0 until rdd.partitions.length)
}
/**
* Run a job on all partitions in an RDD and pass the results to a handler function. The function
* that is run against each partition additionally takes `TaskContext` argument.
*
* @param rdd target RDD to run tasks on
* @param processPartition a function to run on each partition of the RDD
* @param resultHandler callback to pass each result to
*/
def runJob[T, U: ClassTag](
rdd: RDD[T],
processPartition: (TaskContext, Iterator[T]) => U,
resultHandler: (Int, U) => Unit)
{
runJob[T, U](rdd, processPartition, 0 until rdd.partitions.length, resultHandler)
}
/**
* Run a job on all partitions in an RDD and pass the results to a handler function.
*
* @param rdd target RDD to run tasks on
* @param processPartition a function to run on each partition of the RDD
* @param resultHandler callback to pass each result to
*/
def runJob[T, U: ClassTag](
rdd: RDD[T],
processPartition: Iterator[T] => U,
resultHandler: (Int, U) => Unit)
{
val processFunc = (context: TaskContext, iter: Iterator[T]) => processPartition(iter)
runJob[T, U](rdd, processFunc, 0 until rdd.partitions.length, resultHandler)
}
/**
* :: DeveloperApi ::
* Run a job that can return approximate results.
*
* @param rdd target RDD to run tasks on
* @param func a function to run on each partition of the RDD
* @param evaluator `ApproximateEvaluator` to receive the partial results
* @param timeout maximum time to wait for the job, in milliseconds
* @return partial result (how partial depends on whether the job was finished before or
* after timeout)
*/
@DeveloperApi
def runApproximateJob[T, U, R](
rdd: RDD[T],
func: (TaskContext, Iterator[T]) => U,
evaluator: ApproximateEvaluator[U, R],
timeout: Long): PartialResult[R] = {
assertNotStopped()
val callSite = getCallSite
logInfo("Starting job: " + callSite.shortForm)
val start = System.nanoTime
val cleanedFunc = clean(func)
val result = dagScheduler.runApproximateJob(rdd, cleanedFunc, evaluator, callSite, timeout,
localProperties.get)
logInfo(
"Job finished: " + callSite.shortForm + ", took " + (System.nanoTime - start) / 1e9 + " s")
result
}
/**
* Submit a job for execution and return a FutureJob holding the result.
*
* @param rdd target RDD to run tasks on
* @param processPartition a function to run on each partition of the RDD
* @param partitions set of partitions to run on; some jobs may not want to compute on all
* partitions of the target RDD, e.g. for operations like `first()`
* @param resultHandler callback to pass each result to
* @param resultFunc function to be executed when the result is ready
*/
def submitJob[T, U, R](
rdd: RDD[T],
processPartition: Iterator[T] => U,
partitions: Seq[Int],
resultHandler: (Int, U) => Unit,
resultFunc: => R): SimpleFutureAction[R] =
{
assertNotStopped()
val cleanF = clean(processPartition)
val callSite = getCallSite
val waiter = dagScheduler.submitJob(
rdd,
(context: TaskContext, iter: Iterator[T]) => cleanF(iter),
partitions,
callSite,
resultHandler,
localProperties.get)
new SimpleFutureAction(waiter, resultFunc)
}
/**
* Submit a map stage for execution. This is currently an internal API only, but might be
* promoted to DeveloperApi in the future.
*/
private[spark] def submitMapStage[K, V, C](dependency: ShuffleDependency[K, V, C])
: SimpleFutureAction[MapOutputStatistics] = {
assertNotStopped()
val callSite = getCallSite()
var result: MapOutputStatistics = null
val waiter = dagScheduler.submitMapStage(
dependency,
(r: MapOutputStatistics) => { result = r },
callSite,
localProperties.get)
new SimpleFutureAction[MapOutputStatistics](waiter, result)
}
/**
* Cancel active jobs for the specified group. See `org.apache.spark.SparkContext.setJobGroup`
* for more information.
*/
def cancelJobGroup(groupId: String) {
assertNotStopped()
dagScheduler.cancelJobGroup(groupId)
}
/** Cancel all jobs that have been scheduled or are running. */
def cancelAllJobs() {
assertNotStopped()
dagScheduler.cancelAllJobs()
}
/**
* Cancel a given job if it's scheduled or running.
*
* @param jobId the job ID to cancel
* @param reason optional reason for cancellation
* @note Throws `InterruptedException` if the cancel message cannot be sent
*/
def cancelJob(jobId: Int, reason: String): Unit = {
dagScheduler.cancelJob(jobId, Option(reason))
}
/**
* Cancel a given job if it's scheduled or running.
*
* @param jobId the job ID to cancel
* @note Throws `InterruptedException` if the cancel message cannot be sent
*/
def cancelJob(jobId: Int): Unit = {
dagScheduler.cancelJob(jobId, None)
}
/**
* Cancel a given stage and all jobs associated with it.
*
* @param stageId the stage ID to cancel
* @param reason reason for cancellation
* @note Throws `InterruptedException` if the cancel message cannot be sent
*/
def cancelStage(stageId: Int, reason: String): Unit = {
dagScheduler.cancelStage(stageId, Option(reason))
}
/**
* Cancel a given stage and all jobs associated with it.
*
* @param stageId the stage ID to cancel
* @note Throws `InterruptedException` if the cancel message cannot be sent
*/
def cancelStage(stageId: Int): Unit = {
dagScheduler.cancelStage(stageId, None)
}
/**
* Clean a closure to make it ready to serialized and send to tasks
* (removes unreferenced variables in $outer's, updates REPL variables)
* If <tt>checkSerializable</tt> is set, <tt>clean</tt> will also proactively
* check to see if <tt>f</tt> is serializable and throw a <tt>SparkException</tt>
* if not.
*
* @param f the closure to clean
* @param checkSerializable whether or not to immediately check <tt>f</tt> for serializability
* @throws SparkException if <tt>checkSerializable</tt> is set but <tt>f</tt> is not
* serializable
* @return the cleaned closure
*/
private[spark] def clean[F <: AnyRef](f: F, checkSerializable: Boolean = true): F = {
ClosureCleaner.clean(f, checkSerializable)
f
}
/**
* Set the directory under which RDDs are going to be checkpointed.
* @param directory path to the directory where checkpoint files will be stored
* (must be HDFS path if running in cluster)
*/
def setCheckpointDir(directory: String) {
// If we are running on a cluster, log a warning if the directory is local.
// Otherwise, the driver may attempt to reconstruct the checkpointed RDD from
// its own local file system, which is incorrect because the checkpoint files
// are actually on the executor machines.
if (!isLocal && Utils.nonLocalPaths(directory).isEmpty) {
logWarning("Spark is not running in local mode, therefore the checkpoint directory " +
s"must not be on the local filesystem. Directory '$directory' " +
"appears to be on the local filesystem.")
}
checkpointDir = Option(directory).map { dir =>
val path = new Path(dir, UUID.randomUUID().toString)
val fs = path.getFileSystem(hadoopConfiguration)
fs.mkdirs(path)
fs.getFileStatus(path).getPath.toString
}
}
def getCheckpointDir: Option[String] = checkpointDir
/** Default level of parallelism to use when not given by user (e.g. parallelize and makeRDD). */
def defaultParallelism: Int = {
assertNotStopped()
taskScheduler.defaultParallelism
}
/**
* Default min number of partitions for Hadoop RDDs when not given by user
* Notice that we use math.min so the "defaultMinPartitions" cannot be higher than 2.
* The reasons for this are discussed in https://github.com/mesos/spark/pull/718
*/
def defaultMinPartitions: Int = math.min(defaultParallelism, 2)
private val nextShuffleId = new AtomicInteger(0)
private[spark] def newShuffleId(): Int = nextShuffleId.getAndIncrement()
private val nextRddId = new AtomicInteger(0)
/** Register a new RDD, returning its RDD ID */
private[spark] def newRddId(): Int = nextRddId.getAndIncrement()
/**
* Registers listeners specified in spark.extraListeners, then starts the listener bus.
* This should be called after all internal listeners have been registered with the listener bus
* (e.g. after the web UI and event logging listeners have been registered).
*/
private def setupAndStartListenerBus(): Unit = {
// Use reflection to instantiate listeners specified via `spark.extraListeners`
try {
val listenerClassNames: Seq[String] =
conf.get("spark.extraListeners", "").split(',').map(_.trim).filter(_ != "")
for (className <- listenerClassNames) {
// Use reflection to find the right constructor
val constructors = {
val listenerClass = Utils.classForName(className)
listenerClass
.getConstructors
.asInstanceOf[Array[Constructor[_ <: SparkListenerInterface]]]
}
val constructorTakingSparkConf = constructors.find { c =>
c.getParameterTypes.sameElements(Array(classOf[SparkConf]))
}
lazy val zeroArgumentConstructor = constructors.find { c =>
c.getParameterTypes.isEmpty
}
val listener: SparkListenerInterface = {
if (constructorTakingSparkConf.isDefined) {
constructorTakingSparkConf.get.newInstance(conf)
} else if (zeroArgumentConstructor.isDefined) {
zeroArgumentConstructor.get.newInstance()
} else {
throw new SparkException(
s"$className did not have a zero-argument constructor or a" +
" single-argument constructor that accepts SparkConf. Note: if the class is" +
" defined inside of another Scala class, then its constructors may accept an" +
" implicit parameter that references the enclosing class; in this case, you must" +
" define the listener as a top-level class in order to prevent this extra" +
" parameter from breaking Spark's ability to find a valid constructor.")
}
}
listenerBus.addListener(listener)
logInfo(s"Registered listener $className")
}
} catch {
case e: Exception =>
try {
stop()
} finally {
throw new SparkException(s"Exception when registering SparkListener", e)
}
}
listenerBus.start()
_listenerBusStarted = true
}
/** Post the application start event */
private def postApplicationStart() {
// Note: this code assumes that the task scheduler has been initialized and has contacted
// the cluster manager to get an application ID (in case the cluster manager provides one).
listenerBus.post(SparkListenerApplicationStart(appName, Some(applicationId),
startTime, sparkUser, applicationAttemptId, schedulerBackend.getDriverLogUrls))
}
/** Post the application end event */
private def postApplicationEnd() {
listenerBus.post(SparkListenerApplicationEnd(System.currentTimeMillis))
}
/** Post the environment update event once the task scheduler is ready */
private def postEnvironmentUpdate() {
if (taskScheduler != null) {
val schedulingMode = getSchedulingMode.toString
val addedJarPaths = addedJars.keys.toSeq
val addedFilePaths = addedFiles.keys.toSeq
val environmentDetails = SparkEnv.environmentDetails(conf, schedulingMode, addedJarPaths,
addedFilePaths)
val environmentUpdate = SparkListenerEnvironmentUpdate(environmentDetails)
listenerBus.post(environmentUpdate)
}
}
// In order to prevent multiple SparkContexts from being active at the same time, mark this
// context as having finished construction.
// NOTE: this must be placed at the end of the SparkContext constructor.
SparkContext.setActiveContext(this, allowMultipleContexts)
}
/**
* The SparkContext object contains a number of implicit conversions and parameters for use with
* various Spark features.
*/
object SparkContext extends Logging {
private val VALID_LOG_LEVELS =
Set("ALL", "DEBUG", "ERROR", "FATAL", "INFO", "OFF", "TRACE", "WARN")
/**
* Lock that guards access to global variables that track SparkContext construction.
*/
private val SPARK_CONTEXT_CONSTRUCTOR_LOCK = new Object()
/**
* The active, fully-constructed SparkContext. If no SparkContext is active, then this is `null`.
*
* Access to this field is guarded by SPARK_CONTEXT_CONSTRUCTOR_LOCK.
*/
private val activeContext: AtomicReference[SparkContext] =
new AtomicReference[SparkContext](null)
/**
* Points to a partially-constructed SparkContext if some thread is in the SparkContext
* constructor, or `None` if no SparkContext is being constructed.
*
* Access to this field is guarded by SPARK_CONTEXT_CONSTRUCTOR_LOCK
*/
private var contextBeingConstructed: Option[SparkContext] = None
/**
* Called to ensure that no other SparkContext is running in this JVM.
*
* Throws an exception if a running context is detected and logs a warning if another thread is
* constructing a SparkContext. This warning is necessary because the current locking scheme
* prevents us from reliably distinguishing between cases where another context is being
* constructed and cases where another constructor threw an exception.
*/
private def assertNoOtherContextIsRunning(
sc: SparkContext,
allowMultipleContexts: Boolean): Unit = {
SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized {
Option(activeContext.get()).filter(_ ne sc).foreach { ctx =>
val errMsg = "Only one SparkContext may be running in this JVM (see SPARK-2243)." +
" To ignore this error, set spark.driver.allowMultipleContexts = true. " +
s"The currently running SparkContext was created at:\\n${ctx.creationSite.longForm}"
val exception = new SparkException(errMsg)
if (allowMultipleContexts) {
logWarning("Multiple running SparkContexts detected in the same JVM!", exception)
} else {
throw exception
}
}
contextBeingConstructed.filter(_ ne sc).foreach { otherContext =>
// Since otherContext might point to a partially-constructed context, guard against
// its creationSite field being null:
val otherContextCreationSite =
Option(otherContext.creationSite).map(_.longForm).getOrElse("unknown location")
val warnMsg = "Another SparkContext is being constructed (or threw an exception in its" +
" constructor). This may indicate an error, since only one SparkContext may be" +
" running in this JVM (see SPARK-2243)." +
s" The other SparkContext was created at:\\n$otherContextCreationSite"
logWarning(warnMsg)
}
}
}
/**
* This function may be used to get or instantiate a SparkContext and register it as a
* singleton object. Because we can only have one active SparkContext per JVM,
* this is useful when applications may wish to share a SparkContext.
*
* @note This function cannot be used to create multiple SparkContext instances
* even if multiple contexts are allowed.
* @param config `SparkConfig` that will be used for initialisation of the `SparkContext`
* @return current `SparkContext` (or a new one if it wasn't created before the function call)
*/
def getOrCreate(config: SparkConf): SparkContext = {
// Synchronize to ensure that multiple create requests don't trigger an exception
// from assertNoOtherContextIsRunning within setActiveContext
SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized {
if (activeContext.get() == null) {
setActiveContext(new SparkContext(config), allowMultipleContexts = false)
} else {
if (config.getAll.nonEmpty) {
logWarning("Using an existing SparkContext; some configuration may not take effect.")
}
}
activeContext.get()
}
}
/**
* This function may be used to get or instantiate a SparkContext and register it as a
* singleton object. Because we can only have one active SparkContext per JVM,
* this is useful when applications may wish to share a SparkContext.
*
* This method allows not passing a SparkConf (useful if just retrieving).
*
* @note This function cannot be used to create multiple SparkContext instances
* even if multiple contexts are allowed.
* @return current `SparkContext` (or a new one if wasn't created before the function call)
*/
def getOrCreate(): SparkContext = {
SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized {
if (activeContext.get() == null) {
setActiveContext(new SparkContext(), allowMultipleContexts = false)
}
activeContext.get()
}
}
/** Return the current active [[SparkContext]] if any. */
private[spark] def getActive: Option[SparkContext] = {
SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized {
Option(activeContext.get())
}
}
/**
* Called at the beginning of the SparkContext constructor to ensure that no SparkContext is
* running. Throws an exception if a running context is detected and logs a warning if another
* thread is constructing a SparkContext. This warning is necessary because the current locking
* scheme prevents us from reliably distinguishing between cases where another context is being
* constructed and cases where another constructor threw an exception.
*/
private[spark] def markPartiallyConstructed(
sc: SparkContext,
allowMultipleContexts: Boolean): Unit = {
SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized {
assertNoOtherContextIsRunning(sc, allowMultipleContexts)
contextBeingConstructed = Some(sc)
}
}
/**
* Called at the end of the SparkContext constructor to ensure that no other SparkContext has
* raced with this constructor and started.
*/
private[spark] def setActiveContext(
sc: SparkContext,
allowMultipleContexts: Boolean): Unit = {
SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized {
assertNoOtherContextIsRunning(sc, allowMultipleContexts)
contextBeingConstructed = None
activeContext.set(sc)
}
}
/**
* Clears the active SparkContext metadata. This is called by `SparkContext#stop()`. It's
* also called in unit tests to prevent a flood of warnings from test suites that don't / can't
* properly clean up their SparkContexts.
*/
private[spark] def clearActiveContext(): Unit = {
SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized {
activeContext.set(null)
}
}
private[spark] val SPARK_JOB_DESCRIPTION = "spark.job.description"
private[spark] val SPARK_JOB_GROUP_ID = "spark.jobGroup.id"
private[spark] val SPARK_JOB_INTERRUPT_ON_CANCEL = "spark.job.interruptOnCancel"
private[spark] val RDD_SCOPE_KEY = "spark.rdd.scope"
private[spark] val RDD_SCOPE_NO_OVERRIDE_KEY = "spark.rdd.scope.noOverride"
/**
* Executor id for the driver. In earlier versions of Spark, this was `<driver>`, but this was
* changed to `driver` because the angle brackets caused escaping issues in URLs and XML (see
* SPARK-6716 for more details).
*/
private[spark] val DRIVER_IDENTIFIER = "driver"
/**
* Legacy version of DRIVER_IDENTIFIER, retained for backwards-compatibility.
*/
private[spark] val LEGACY_DRIVER_IDENTIFIER = "<driver>"
private implicit def arrayToArrayWritable[T <% Writable: ClassTag](arr: Traversable[T])
: ArrayWritable = {
def anyToWritable[U <% Writable](u: U): Writable = u
new ArrayWritable(classTag[T].runtimeClass.asInstanceOf[Class[Writable]],
arr.map(x => anyToWritable(x)).toArray)
}
/**
* Find the JAR from which a given class was loaded, to make it easy for users to pass
* their JARs to SparkContext.
*
* @param cls class that should be inside of the jar
* @return jar that contains the Class, `None` if not found
*/
def jarOfClass(cls: Class[_]): Option[String] = {
val uri = cls.getResource("/" + cls.getName.replace('.', '/') + ".class")
if (uri != null) {
val uriStr = uri.toString
if (uriStr.startsWith("jar:file:")) {
// URI will be of the form "jar:file:/path/foo.jar!/package/cls.class",
// so pull out the /path/foo.jar
Some(uriStr.substring("jar:file:".length, uriStr.indexOf('!')))
} else {
None
}
} else {
None
}
}
/**
* Find the JAR that contains the class of a particular object, to make it easy for users
* to pass their JARs to SparkContext. In most cases you can call jarOfObject(this) in
* your driver program.
*
* @param obj reference to an instance which class should be inside of the jar
* @return jar that contains the class of the instance, `None` if not found
*/
def jarOfObject(obj: AnyRef): Option[String] = jarOfClass(obj.getClass)
/**
* Creates a modified version of a SparkConf with the parameters that can be passed separately
* to SparkContext, to make it easier to write SparkContext's constructors. This ignores
* parameters that are passed as the default value of null, instead of throwing an exception
* like SparkConf would.
*/
private[spark] def updatedConf(
conf: SparkConf,
master: String,
appName: String,
sparkHome: String = null,
jars: Seq[String] = Nil,
environment: Map[String, String] = Map()): SparkConf =
{
val res = conf.clone()
res.setMaster(master)
res.setAppName(appName)
if (sparkHome != null) {
res.setSparkHome(sparkHome)
}
if (jars != null && !jars.isEmpty) {
res.setJars(jars)
}
res.setExecutorEnv(environment.toSeq)
res
}
/**
* The number of driver cores to use for execution in local mode, 0 otherwise.
*/
private[spark] def numDriverCores(master: String): Int = {
def convertToInt(threads: String): Int = {
if (threads == "*") Runtime.getRuntime.availableProcessors() else threads.toInt
}
master match {
case "local" => 1
case SparkMasterRegex.LOCAL_N_REGEX(threads) => convertToInt(threads)
case SparkMasterRegex.LOCAL_N_FAILURES_REGEX(threads, _) => convertToInt(threads)
case _ => 0 // driver is not used for execution
}
}
/**
* Create a task scheduler based on a given master URL.
* Return a 2-tuple of the scheduler backend and the task scheduler.
*/
private def createTaskScheduler(
sc: SparkContext,
master: String,
deployMode: String): (SchedulerBackend, TaskScheduler) = {
import SparkMasterRegex._
// When running locally, don't try to re-execute tasks on failure.
val MAX_LOCAL_TASK_FAILURES = 1
master match {
case "local" =>
val scheduler = new TaskSchedulerImpl(sc, MAX_LOCAL_TASK_FAILURES, isLocal = true)
val backend = new LocalSchedulerBackend(sc.getConf, scheduler, 1)
scheduler.initialize(backend)
(backend, scheduler)
case LOCAL_N_REGEX(threads) =>
def localCpuCount: Int = Runtime.getRuntime.availableProcessors()
// local[*] estimates the number of cores on the machine; local[N] uses exactly N threads.
val threadCount = if (threads == "*") localCpuCount else threads.toInt
if (threadCount <= 0) {
throw new SparkException(s"Asked to run locally with $threadCount threads")
}
val scheduler = new TaskSchedulerImpl(sc, MAX_LOCAL_TASK_FAILURES, isLocal = true)
val backend = new LocalSchedulerBackend(sc.getConf, scheduler, threadCount)
scheduler.initialize(backend)
(backend, scheduler)
case LOCAL_N_FAILURES_REGEX(threads, maxFailures) =>
def localCpuCount: Int = Runtime.getRuntime.availableProcessors()
// local[*, M] means the number of cores on the computer with M failures
// local[N, M] means exactly N threads with M failures
val threadCount = if (threads == "*") localCpuCount else threads.toInt
val scheduler = new TaskSchedulerImpl(sc, maxFailures.toInt, isLocal = true)
val backend = new LocalSchedulerBackend(sc.getConf, scheduler, threadCount)
scheduler.initialize(backend)
(backend, scheduler)
case SPARK_REGEX(sparkUrl) =>
val scheduler = new TaskSchedulerImpl(sc)
val masterUrls = sparkUrl.split(",").map("spark://" + _)
val backend = new StandaloneSchedulerBackend(scheduler, sc, masterUrls)
scheduler.initialize(backend)
(backend, scheduler)
case LOCAL_CLUSTER_REGEX(numSlaves, coresPerSlave, memoryPerSlave) =>
// Check to make sure memory requested <= memoryPerSlave. Otherwise Spark will just hang.
val memoryPerSlaveInt = memoryPerSlave.toInt
if (sc.executorMemory > memoryPerSlaveInt) {
throw new SparkException(
"Asked to launch cluster with %d MB RAM / worker but requested %d MB/worker".format(
memoryPerSlaveInt, sc.executorMemory))
}
val scheduler = new TaskSchedulerImpl(sc)
val localCluster = new LocalSparkCluster(
numSlaves.toInt, coresPerSlave.toInt, memoryPerSlaveInt, sc.conf)
val masterUrls = localCluster.start()
val backend = new StandaloneSchedulerBackend(scheduler, sc, masterUrls)
scheduler.initialize(backend)
backend.shutdownCallback = (backend: StandaloneSchedulerBackend) => {
localCluster.stop()
}
(backend, scheduler)
case masterUrl =>
val cm = getClusterManager(masterUrl) match {
case Some(clusterMgr) => clusterMgr
case None => throw new SparkException("Could not parse Master URL: '" + master + "'")
}
try {
val scheduler = cm.createTaskScheduler(sc, masterUrl)
val backend = cm.createSchedulerBackend(sc, masterUrl, scheduler)
cm.initialize(scheduler, backend)
(backend, scheduler)
} catch {
case se: SparkException => throw se
case NonFatal(e) =>
throw new SparkException("External scheduler cannot be instantiated", e)
}
}
}
private def getClusterManager(url: String): Option[ExternalClusterManager] = {
val loader = Utils.getContextOrSparkClassLoader
val serviceLoaders =
ServiceLoader.load(classOf[ExternalClusterManager], loader).asScala.filter(_.canCreate(url))
if (serviceLoaders.size > 1) {
throw new SparkException(
s"Multiple external cluster managers registered for the url $url: $serviceLoaders")
}
serviceLoaders.headOption
}
}
/**
* A collection of regexes for extracting information from the master string.
*/
private object SparkMasterRegex {
// Regular expression used for local[N] and local[*] master formats
val LOCAL_N_REGEX = """local\\[([0-9]+|\\*)\\]""".r
// Regular expression for local[N, maxRetries], used in tests with failing tasks
val LOCAL_N_FAILURES_REGEX = """local\\[([0-9]+|\\*)\\s*,\\s*([0-9]+)\\]""".r
// Regular expression for simulating a Spark cluster of [N, cores, memory] locally
val LOCAL_CLUSTER_REGEX = """local-cluster\\[\\s*([0-9]+)\\s*,\\s*([0-9]+)\\s*,\\s*([0-9]+)\\s*]""".r
// Regular expression for connecting to Spark deploy clusters
val SPARK_REGEX = """spark://(.*)""".r
}
/**
* A class encapsulating how to convert some type `T` from `Writable`. It stores both the `Writable`
* class corresponding to `T` (e.g. `IntWritable` for `Int`) and a function for doing the
* conversion.
* The getter for the writable class takes a `ClassTag[T]` in case this is a generic object
* that doesn't know the type of `T` when it is created. This sounds strange but is necessary to
* support converting subclasses of `Writable` to themselves (`writableWritableConverter()`).
*/
private[spark] class WritableConverter[T](
val writableClass: ClassTag[T] => Class[_ <: Writable],
val convert: Writable => T)
extends Serializable
object WritableConverter {
// Helper objects for converting common types to Writable
private[spark] def simpleWritableConverter[T, W <: Writable: ClassTag](convert: W => T)
: WritableConverter[T] = {
val wClass = classTag[W].runtimeClass.asInstanceOf[Class[W]]
new WritableConverter[T](_ => wClass, x => convert(x.asInstanceOf[W]))
}
// The following implicit functions were in SparkContext before 1.3 and users had to
// `import SparkContext._` to enable them. Now we move them here to make the compiler find
// them automatically. However, we still keep the old functions in SparkContext for backward
// compatibility and forward to the following functions directly.
implicit def intWritableConverter(): WritableConverter[Int] =
simpleWritableConverter[Int, IntWritable](_.get)
implicit def longWritableConverter(): WritableConverter[Long] =
simpleWritableConverter[Long, LongWritable](_.get)
implicit def doubleWritableConverter(): WritableConverter[Double] =
simpleWritableConverter[Double, DoubleWritable](_.get)
implicit def floatWritableConverter(): WritableConverter[Float] =
simpleWritableConverter[Float, FloatWritable](_.get)
implicit def booleanWritableConverter(): WritableConverter[Boolean] =
simpleWritableConverter[Boolean, BooleanWritable](_.get)
implicit def bytesWritableConverter(): WritableConverter[Array[Byte]] = {
simpleWritableConverter[Array[Byte], BytesWritable] { bw =>
// getBytes method returns array which is longer then data to be returned
Arrays.copyOfRange(bw.getBytes, 0, bw.getLength)
}
}
implicit def stringWritableConverter(): WritableConverter[String] =
simpleWritableConverter[String, Text](_.toString)
implicit def writableWritableConverter[T <: Writable](): WritableConverter[T] =
new WritableConverter[T](_.runtimeClass.asInstanceOf[Class[T]], _.asInstanceOf[T])
}
/**
* A class encapsulating how to convert some type `T` to `Writable`. It stores both the `Writable`
* class corresponding to `T` (e.g. `IntWritable` for `Int`) and a function for doing the
* conversion.
* The `Writable` class will be used in `SequenceFileRDDFunctions`.
*/
private[spark] class WritableFactory[T](
val writableClass: ClassTag[T] => Class[_ <: Writable],
val convert: T => Writable) extends Serializable
object WritableFactory {
private[spark] def simpleWritableFactory[T: ClassTag, W <: Writable : ClassTag](convert: T => W)
: WritableFactory[T] = {
val writableClass = implicitly[ClassTag[W]].runtimeClass.asInstanceOf[Class[W]]
new WritableFactory[T](_ => writableClass, convert)
}
implicit def intWritableFactory: WritableFactory[Int] =
simpleWritableFactory(new IntWritable(_))
implicit def longWritableFactory: WritableFactory[Long] =
simpleWritableFactory(new LongWritable(_))
implicit def floatWritableFactory: WritableFactory[Float] =
simpleWritableFactory(new FloatWritable(_))
implicit def doubleWritableFactory: WritableFactory[Double] =
simpleWritableFactory(new DoubleWritable(_))
implicit def booleanWritableFactory: WritableFactory[Boolean] =
simpleWritableFactory(new BooleanWritable(_))
implicit def bytesWritableFactory: WritableFactory[Array[Byte]] =
simpleWritableFactory(new BytesWritable(_))
implicit def stringWritableFactory: WritableFactory[String] =
simpleWritableFactory(new Text(_))
implicit def writableWritableFactory[T <: Writable: ClassTag]: WritableFactory[T] =
simpleWritableFactory(w => w)
}
|
jianran/spark
|
core/src/main/scala/org/apache/spark/SparkContext.scala
|
Scala
|
apache-2.0
| 116,478 |
package net.ultrametrics.fractactor
import net.ultrametrics.math.Complex
/**
* A trait for any holomorphic function that can be calculated from a
* point in the complex plane.
*/
trait HolomorphicFunction
{
/**
* Calculate the function at a single point.
* @param c a point in the complex plane
* @return an evaluation of the function
*/
def calculate(c: Complex): Int = c.scalar.toInt
}
|
pchuck/fractactor
|
src/main/scala/net/ultrametrics/fractactor/HolomorphicFunction.scala
|
Scala
|
bsd-2-clause
| 409 |
/*
* Copyright (C) 2016-2019 Lightbend Inc. <https://www.lightbend.com>
*/
package com.lightbend.internal.broker
import akka.persistence.query.Offset
import akka.stream.scaladsl.Source
import com.lightbend.lagom.scaladsl.api.broker.Subscriber
import com.lightbend.lagom.scaladsl.api.broker.Topic
import com.lightbend.lagom.scaladsl.persistence.AggregateEvent
import com.lightbend.lagom.scaladsl.persistence.AggregateEventTag
import scala.collection.immutable
trait InternalTopic[Message] extends Topic[Message] {
final override def topicId: Topic.TopicId =
throw new UnsupportedOperationException("Topic#topicId is not permitted in the service's topic implementation")
final override def subscribe: Subscriber[Message] =
throw new UnsupportedOperationException("Topic#subscribe is not permitted in the service's topic implementation.")
}
final class TaggedOffsetTopicProducer[Message, Event <: AggregateEvent[Event]](
val tags: immutable.Seq[AggregateEventTag[Event]],
val readSideStream: (AggregateEventTag[Event], Offset) => Source[(Message, Offset), _]
) extends InternalTopic[Message]
|
rcavalcanti/lagom
|
service/scaladsl/broker/src/main/scala/com/lightbend/internal/broker/TopicProducers.scala
|
Scala
|
apache-2.0
| 1,117 |
trait BankMessage {
def accountId: String
}
case class Open(accountId: String) extends BankMessage
case class Close(accountId: String) extends BankMessage
case class Deposit(accountId: String, amount: Double) extends BankMessage
case class Withdrawal(accountId: String, amount: Double) extends BankMessage
case class GetBalance(accountId: String) extends BankMessage
case class Transfer(accountId: String, toId: String, amount: Double) extends BankMessage
|
timschlechter/akka-demos
|
scala/src/main/scala/Messages.scala
|
Scala
|
mit
| 459 |
package org.geneticmachine.machine
import org.geneticmachine._
import scala.concurrent.Future
final class SimpleMachine[+C <: ExecutionContext]
(implicit val executionContext: C, implicit val db: DBDriver)
extends Machine[C] {
val log = executionContext.logger
def submit[I, O, S, C1 >: C <: ExecutionContext](experiment: Experiment[I, O, S, C1]): Future[ExperimentResult[S]] = {
import executionContext.futureExecutionContext
log.info {
s"Executing:\\n${experiment.scheme}"
}
def foldExperiment(ex: Experiment[I, O, S, C1], acc: List[PairResult[S]]): Future[ExperimentResult[S]] = {
val nextOpt = ex.next(ExperimentResult(acc))
if (nextOpt.isDefined) {
val (pair, nextPart) = nextOpt.get
val pairResult = executionContext.submit(pair)(db)
pairResult.flatMap { pr: PairResult[S] =>
val acc1 = pr :: acc
foldExperiment(nextPart, acc1)
}
} else {
Future.successful(ExperimentResult(acc))
}
}
foldExperiment(experiment, List.empty)
}
}
|
ZloVechno/genetic-machine
|
src/main/scala/org/geneticmachine/machine/SimpleMachine.scala
|
Scala
|
mit
| 1,068 |
// Copyright 2014 Foursquare Labs Inc. All Rights Reserved.
package io.fsq.spindle.codegen.runtime.test
import io.fsq.spindle.codegen.runtime.enhanced_types.test.gen.{BSONObjectFields, ObjectIdFields, UUIDFields}
import io.fsq.spindle.runtime.{KnownTProtocolNames, TProtocolInfo}
import java.util.UUID
import org.apache.thrift.TBase
import org.apache.thrift.transport.TMemoryBuffer
import org.bson.BasicBSONObject
import org.bson.types.ObjectId
import org.junit.Assert.assertEquals
import org.junit.Test
import org.junit.runner.RunWith
import org.junit.runners.Parameterized
import org.junit.runners.Parameterized.Parameters
import scala.collection.JavaConverters._
@RunWith(value = classOf[Parameterized])
class EnhancedTypesTest(tproto: String) {
private def roundTripTest[T <: TBase[_, _]](source: T, dest: T): Unit = {
val trans = new TMemoryBuffer(1024)
val writeProtocol = {
val protocolFactory = TProtocolInfo.getWriterFactory(tproto)
protocolFactory.getProtocol(trans)
}
source.write(writeProtocol)
val readProtocol = {
val protocolFactory = TProtocolInfo.getReaderFactory(tproto)
protocolFactory.getProtocol(trans)
}
dest.read(readProtocol)
assertEquals(source, dest)
}
@Test
def objectIdFields(): Unit = {
val src = ObjectIdFields.newBuilder
.foo(new ObjectId())
.bar(new ObjectId() :: new ObjectId() :: Nil)
.baz(Map("A" -> new ObjectId(), "B" -> new ObjectId()))
.result()
roundTripTest(src, ObjectIdFields.createRawRecord)
}
@Test
def bsonObjectFields(): Unit = {
val bso = new BasicBSONObject()
bso.put("foo", "bar")
val src = BSONObjectFields.newBuilder
.bso(bso)
.result()
val dest = BSONObjectFields.createRawRecord
roundTripTest(src, dest)
assertEquals(src.bsoOrNull.get("foo"), "bar")
assertEquals(dest.bsoOrNull.get("foo"), "bar")
}
@Test
def uuidFields(): Unit = {
val src = UUIDFields.newBuilder
.qux(UUID.fromString("cba096a8-2e96-4668-9308-3086591201a7"))
.quux(
Vector(
UUID.fromString("14edb439-75e3-4cd8-9175-b4460815670e"),
UUID.fromString("d86405f4-0003-44af-96d9-22368e53f116")
)
)
.norf(
Map(
"A" -> UUID.fromString("31b0db7d-2de7-4aa7-b3c1-a07d649f5770"),
"B" -> UUID.fromString("9a3685fd-b2ef-4401-b9bc-c1849c280499")
)
)
.result()
roundTripTest(src, UUIDFields.createRawRecord)
}
}
object EnhancedTypesTest {
@Parameters(name = "tproto={0}")
def parameters: java.util.List[String] = {
Vector(
KnownTProtocolNames.TBinaryProtocol,
KnownTProtocolNames.TCompactProtocol,
KnownTProtocolNames.TJSONProtocol,
KnownTProtocolNames.TBSONProtocol,
KnownTProtocolNames.TBSONBinaryProtocol,
KnownTProtocolNames.TReadableJSONProtocol
).asJava
}
}
|
foursquare/fsqio
|
test/jvm/io/fsq/spindle/codegen/runtime/test/EnhancedTypesTest.scala
|
Scala
|
apache-2.0
| 2,898 |
import scala.util.parsing.combinator._
class Arith extends JavaTokenParsers {
def expr: Parser[Any] = term ~ rep("+" ~ term | "-" ~ term)
def term: Parser[Any] = factor ~ rep("*" ~ factor | "/" ~ factor)
def factor: Parser[Any] = floatingPointNumber | "("~expr~")"
}
object ParseExpr extends Arith {
def main(args: Array[String]) {
println("input : "+ args(0))
println(parseAll(expr, args(0)))
}
}
|
sanjosh/scala
|
dsl-demo/src/main/scala/ParseExpr.scala
|
Scala
|
apache-2.0
| 420 |
package models
import com.lukaspradel.steamapi.data.json.playersummaries.Player
import models.ServiceProfile.ProfileState
import reactivemongo.bson.{ BSONDocument, BSONDocumentReader, BSONDocumentWriter }
/**
* Created by henrik on 2017-02-26.
*/
trait SteamProfile extends ServiceProfile {
override type Self = SteamProfile
def service: String = "Steam"
}
trait SteamProfileFactory {
def apply(profileData: Player): SteamProfile
}
object SteamProfile {
implicit object Writer extends BSONDocumentWriter[SteamProfile] {
def write(profile: SteamProfile): BSONDocument = BSONDocument(
"id" -> profile.id,
"service" -> profile.service,
"visible" -> profile.visible,
"displayName" -> profile.displayName,
"avatarUrl" -> profile.avatarUrl,
"profileState" -> profile.profileState,
"isInGame" -> profile.isInGame,
"currentlyPlaying" -> profile.currentlyPlaying
)
}
implicit object Reader extends BSONDocumentReader[SteamProfile] {
def read(doc: BSONDocument): SteamProfile = {
println("asdfADSFGAA SDFAG")
SteamProfileImpl(
doc.getAs[String]("id").get,
doc.getAs[Boolean]("visible").get,
doc.getAs[String]("displayName").get,
doc.getAs[String]("avatarUrl").get,
doc.getAs[ProfileState]("profileState").get,
doc.getAs[Game]("currentlyPlaying").get,
doc.getAs[Boolean]("isRegistered").get
)
}
}
}
|
hnrklssn/game-check-match
|
app/models/SteamProfile.scala
|
Scala
|
apache-2.0
| 1,447 |
//
// Scaled TODO Mode - a Scaled major mode for editing TODO files
// http://github.com/scaled/todo-mode/blob/master/LICENSE
package scaled.todo
import scaled._
import scaled.grammar._
import scaled.major.TextConfig
object TodoConfig extends Config.Defs {
/** The CSS style applied to `done` list entries. */
val doneStyle = "textDoneFace"
}
@Plugin(tag="textmate-grammar")
class TodoGrammarPlugin extends GrammarPlugin {
import TextConfig._
import TodoConfig._
def grammars = Map("text.todo" -> "unused")
override def grammar (scopeName :String) = scopeName match {
case "text.todo" => new Grammar(
name = "TODO",
scopeName = "text.todo",
foldingStartMarker = None,
foldingStopMarker = None) {
val repository = Map[String,Rule]()
// we have to specify a return type here to work around scalac bug; meh
val patterns :List[Rule] = List(
single("""^\\* .*$""", name=Some("markup.heading")),
single("""^\\*\\* .*$""", name=Some("markup.subheading")),
single("""^\\s*- .*$""", name=Some("markup.list")),
single("""^\\s*x .*$""", name=Some("markup.list.complete"))
)
}
case _ => super.grammar(scopeName)
}
override def effacers = List(
effacer("markup.heading", headerStyle),
effacer("markup.subheading", subHeaderStyle),
// effacer("markup.list", listStyle),
effacer("markup.list.complete", doneStyle)
)
}
@Major(name="todo",
tags=Array("text", "project", "todo"),
pats=Array("TODO.*"),
desc="A major mode for editing TODO files.")
class TodoMode (env :Env) extends GrammarTextMode(env) {
override def dispose () :Unit = {} // nada for now
override def configDefs = TodoConfig :: super.configDefs
override def stylesheets = stylesheetURL("/todo.css") :: super.stylesheets
override def langScope = "text.todo"
}
|
scaled/todo-mode
|
src/scala/scaled/todo/TodoMode.scala
|
Scala
|
bsd-3-clause
| 1,877 |
package models
import java.util.UUID.fromString
import base.SemanticFeatureSpec
import utils.semantic.{ Vocabulary, Resource }
import utils.Implicits._
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
class TimetableSpec extends SemanticFeatureSpec {
val timetable1 = Timetable(Resource("http://labwork_1.com"), fromString("cca9c110-9d02-11e4-bd06-0800200c9a66"))
val timetable2 = Timetable(Resource("http://labwork_1.com"), fromString("d2781ce0-9d02-11e4-bd06-0800200c9a66"))
val timetable3 = Timetable(Resource("http://labwork_3.com"), fromString("d998c240-9d02-11e4-bd06-0800200c9a66"))
val expectedResource1 = Resource(s"http://lwm.gm.fh-koeln.de/timetables/${fromString("cca9c110-9d02-11e4-bd06-0800200c9a66")}")
val expectedResource2 = Resource(s"http://lwm.gm.fh-koeln.de/timetables/${fromString("d2781ce0-9d02-11e4-bd06-0800200c9a66")}")
val expectedResource3 = Resource(s"http://lwm.gm.fh-koeln.de/timetables/${fromString("d998c240-9d02-11e4-bd06-0800200c9a66")}")
"Timetables" should {
"return the number of timetables" in {
val expected = 0
eventually {
Timetables.size should be(expected)
}
}
"create a new timetable" in {
val futureTimetable = Timetables.create(timetable1)
whenReady(futureTimetable) { timetable ⇒
timetable should be(expectedResource1)
Timetables.size should be(1)
s"""
|${Vocabulary.defaultPrefixes}
|
|select * where {
|
| $expectedResource1 rdf:type lwm:Timetable .
| $expectedResource1 lwm:hasId ?id .
| $expectedResource1 lwm:hasLabWork ?labwork .
|}
""".stripMargin.execSelect().map { solution ⇒
val id = solution.data("id").asLiteral().getString
val labwork = solution.data("labwork").asResource().getURI
(id, labwork)
} should contain theSameElementsAs List((timetable1.id.toString, timetable1.labwork.value))
}
}
"get timetable from resource" in {
"drop all".execUpdate()
val futureTimetables = Timetables.create(timetable1) :: Timetables.create(timetable2) :: Nil
whenReady(Future.sequence(futureTimetables)) { timetables ⇒
Timetables.get(expectedResource1) should be(Option(timetable1))
Timetables.get(expectedResource3) should be(None)
}
}
"return a list of all timetables" in {
"drop all".execUpdate()
val futureTimetables = Timetables.create(timetable1) ::
Timetables.create(timetable2) ::
Timetables.create(timetable3) :: Nil
whenReady(Future.sequence(futureTimetables)) { timetables ⇒
whenReady(Timetables.all()) { all ⇒
all should contain theSameElementsAs List(expectedResource1, expectedResource2, expectedResource3)
}
}
}
"should return true if an arbitrary resource is really a timetable" in {
Timetables.check(expectedResource1) should be(right = true)
}
}
}
|
FHK-ADV/lwm
|
test/models/TimetableSpec.scala
|
Scala
|
mit
| 3,047 |
/**
* Copyright (C) 2011 Orbeon, Inc.
*
* This program is free software; you can redistribute it and/or modify it under the terms of the
* GNU Lesser General Public License as published by the Free Software Foundation; either version
* 2.1 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Lesser General Public License for more details.
*
* The full text of the license is available at http://www.gnu.org/copyleft/lesser.html
*/
package org.orbeon.oxf.fb
import org.orbeon.saxon.om.NodeInfo
import org.orbeon.oxf.xforms.action.XFormsAPI._
import org.orbeon.scaxon.XML._
import org.orbeon.oxf.xforms.XFormsUtils
import org.orbeon.oxf.fr.FormRunner._
trait ContainerOps extends ControlOps {
self: GridOps ⇒ // funky dependency, to resolve at some point
def containerById(containerId: String): NodeInfo = {
// Support effective id, to make it easier to use from XForms (i.e. no need to call XFormsUtils.getStaticIdFromId every time)
val staticId = XFormsUtils.getStaticIdFromId(containerId)
byId(fbFormInstance, staticId) filter IsContainer head
}
def controlsInContainer(containerId: String): Int = (containerById(containerId) \\\\ "*:td" \\ *).length
// Find all siblings of the given element with the given name, excepting the given element
def findSiblingsWithName(element: NodeInfo, siblingName: String) =
element parent * child * filter
(name(_) == siblingName) filterNot
(_ == element)
// Return all the container controls in the view
def getAllContainerControlsWithIds(inDoc: NodeInfo) = getAllControlsWithIds(inDoc) filter IsContainer
def getAllContainerControls(inDoc: NodeInfo) = findFRBodyElement(inDoc) descendant * filter IsContainer
// Various counts
def countSections(inDoc: NodeInfo) = getAllControlsWithIds(inDoc) count IsSection
def countAllGrids(inDoc: NodeInfo) = findFRBodyElement(inDoc) descendant * count IsGrid
def countRepeats(inDoc: NodeInfo) = getAllControlsWithIds(inDoc) count IsRepeat // includes repeated grids
def countSectionTemplates(inDoc: NodeInfo) = findFRBodyElement(inDoc) descendant * count IsSectionTemplateContent // non-repeated grids
def countGrids(inDoc: NodeInfo) = countAllGrids(inDoc) - countRepeats(inDoc)
def countAllNonContainers(inDoc: NodeInfo) = getAllControlsWithIds(inDoc) filterNot IsContainer size
def countAllContainers(inDoc: NodeInfo) = getAllContainerControls(inDoc).size
def countAllControls(inDoc: NodeInfo) = countAllContainers(inDoc) + countAllNonContainers(inDoc) + countSectionTemplates(inDoc)
// Delete the entire container and contained controls
def deleteContainerById(canDelete: NodeInfo ⇒ Boolean, containerId: String): Unit = {
val container = containerById(containerId)
if (canDelete(container))
deleteContainer(container)
}
def deleteContainer(container: NodeInfo) = {
// Find the new td to select if we are removing the currently selected td
val newTdToSelect = findNewTdToSelect(container, container \\\\ "*:td")
def recurse(container: NodeInfo): Seq[NodeInfo] = {
// NOTE: Deleting is tricky because NodeInfo is mutation-averse as it keeps a node's index, among others.
// So deleting a node under a given NodeInfo can cause the position of following siblings to be out of date
// and cause errors. So we delete from back to front. But a safer solution should be found.
// Go depth-first so we delete containers after all their content has been deleted
// NOTE: Use toList to make sure we are not lazy, otherwise items might be deleted as we go!
val children = childrenContainers(container).reverse.toList flatMap recurse
val gridContent =
if (IsGrid(container))
container \\\\ "*:tr" \\\\ "*:td" \\ * filter IsControl reverse
else
Seq()
children ++ gridContent :+ container
}
// Start with top-level container
val controls = recurse(container)
// Delete all controls in order
controls flatMap controlElementsToDelete foreach (delete(_))
// Adjust selected td if needed
newTdToSelect foreach selectTd
}
// Move a container based on a move function (typically up or down)
def moveContainer(container: NodeInfo, otherContainer: NodeInfo, move: (NodeInfo, NodeInfo) ⇒ NodeInfo) {
// Get names before moving the container
val nameOption = getControlNameOption(container)
val otherNameOption = getControlNameOption(otherContainer)
val doc = container.getDocumentRoot
// Move container control itself
move(container, otherContainer)
// Try to move based on name of other element
(nameOption, otherNameOption) match {
case (Some(name), Some(otherName)) ⇒
// Move data holder only
for {
holder ← findDataHolders(doc, name)
otherHolder ← findDataHolders(doc, otherName)
} yield
move(holder, otherHolder)
// Move bind
for {
bind ← findBindByName(doc, name)
otherBind ← findBindByName(doc, otherName)
} yield
move(bind, otherBind)
// Try to move resource and template elements to a good place
// TODO: We move the container resource holder, but we should also move together the contained controls' resource holders
def firstControl(s: Seq[NodeInfo]) =
s find (getControlNameOption(_).isDefined)
def tryToMoveHolders(siblingName: String, moveOp: (NodeInfo, NodeInfo) ⇒ NodeInfo) =
findResourceAndTemplateHolders(doc, name) foreach {
holder ⇒
findSiblingsWithName(holder, siblingName).headOption foreach
(moveOp(holder, _))
}
val movedContainer = byId(doc, container \\@ "id").get // must get new reference
(firstControl(movedContainer preceding *), firstControl(movedContainer following *)) match {
case (Some(preceding), _) ⇒ tryToMoveHolders(getControlName(preceding), moveElementAfter)
case (_, Some(following)) ⇒ tryToMoveHolders(getControlName(following), moveElementBefore)
case _ ⇒
}
case _ ⇒
}
}
// Whether it is possible to move an item into the given container
// Currently: must be a section without section template content
// Later: fr:tab (maybe fr:tabview), wizard
def canMoveInto(container: NodeInfo) =
IsSection(container) && ! (container \\ * exists IsSectionTemplateContent)
def sectionsWithTemplates(inDoc: NodeInfo) =
findFRBodyElement(inDoc) descendant * filter IsSection filter (_ \\ * exists IsSectionTemplateContent)
// See: https://github.com/orbeon/orbeon-forms/issues/633
def deleteSectionTemplateContentHolders(inDoc: NodeInfo) = {
// Find data holders for all section templates
val holders =
for {
section ← sectionsWithTemplates(inDoc)
controlName ← getControlNameOption(section).toList
holder ← findDataHolders(inDoc, controlName)
} yield
holder
// Delete all elements underneath those holders
holders foreach { holder ⇒
delete(holder \\ *)
}
}
}
|
evlist/orbeon-forms
|
src/main/scala/org/orbeon/oxf/fb/ContainerOps.scala
|
Scala
|
lgpl-2.1
| 8,057 |
package scribe.file
import scribe.LogRecord
import scribe.output.LogOutput
import scribe.output.format.OutputFormat
import scribe.util.Time
import scribe.writer.Writer
import java.io.File
import java.nio.charset.Charset
import scala.concurrent.ExecutionContext
case class FileWriter(pathBuilder: PathBuilder = PathBuilder.Default,
append: Boolean = true,
flushMode: FlushMode = FlushMode.AsynchronousFlush()(scribe.Execution.global),
charset: Charset = Charset.defaultCharset()) extends Writer {
private var previousFile: Option[File] = None
private var _file: File = resolveFile()
def file: File = _file
def list(): List[File] = pathBuilder.iterator().toList.sortBy(_.lastModified())
def resolveFile(): File = pathBuilder.file(Time())
def updatePath(): Unit = {
val newFile = resolveFile()
_file = newFile
}
override def write[M](record: LogRecord[M], output: LogOutput, outputFormat: OutputFormat): Unit = synchronized {
pathBuilder.before(this)
// Write to LogFile
val logFile = LogFile(this)
if (!previousFile.contains(_file) || logFile.size == 0L) {
previousFile = Some(_file)
if (_file.length() == 0L || !append) {
outputFormat.init(logFile.write)
}
}
outputFormat.begin(logFile.write)
outputFormat(output, logFile.write)
outputFormat.end(logFile.write)
logFile.write(System.lineSeparator())
pathBuilder.after(this)
}
def flush(): Unit = LogFile(this).flush()
def flushNever: FileWriter = copy(flushMode = FlushMode.NeverFlush)
def flushAlways: FileWriter = copy(flushMode = FlushMode.AlwaysFlush)
def flushAsync(implicit ec: ExecutionContext): FileWriter = copy(flushMode = FlushMode.AsynchronousFlush())
}
|
outr/scribe
|
fileModule/shared/src/main/scala/scribe/file/FileWriter.scala
|
Scala
|
mit
| 1,791 |
package org.openapitools.client.model
case class StringParameterValue (
_class: Option[String],
_name: Option[String],
_value: Option[String]
)
object StringParameterValue {
def toStringBody(var_class: Object, var_name: Object, var_value: Object) =
s"""
| {
| "class":$var_class,"name":$var_name,"value":$var_value
| }
""".stripMargin
}
|
cliffano/swaggy-jenkins
|
clients/scala-gatling/generated/src/gatling/scala/org/openapitools/client/model/StringParameterValue.scala
|
Scala
|
mit
| 396 |
/*
* Copyright 2013-2015 Websudos, Limited.
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* - Explicit consent must be obtained from the copyright owner, Outworkers Limited before any redistribution is made.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
package com.websudos.phantom.builder.serializers
import com.websudos.phantom.PhantomBaseSuite
import com.websudos.phantom.dsl._
import com.websudos.phantom.tables.TestDatabase
import com.websudos.util.testing._
import scala.concurrent.duration._
import org.scalatest.FreeSpec
class UpdateQuerySerializationTest extends FreeSpec with PhantomBaseSuite with TestDatabase.connector.Connector {
val comparisonValue = 5
"An Update query should" - {
"allow specifying consistency levels" - {
"specify a consistency level of ALL in an AssignmentsQuery" in {
val url = gen[String]
val query = TestDatabase.recipes.update()
.where(_.url eqs url)
.modify(_.servings setTo Some(comparisonValue))
.consistencyLevel_=(ConsistencyLevel.ALL)
.queryString
if (session.v3orNewer) {
query shouldEqual s"UPDATE phantom.recipes SET servings = 5 WHERE url = '$url';"
} else {
query shouldEqual s"UPDATE phantom.recipes USING CONSISTENCY ALL SET servings = 5 WHERE url = '$url';"
}
}
"chain a ttl clause to an UpdateQuery" in {
val url = gen[String]
val uid = gen[UUID]
val query = TestDatabase.recipes.update.where(_.url eqs url)
.modify(_.uid setTo uid)
.ttl(5.seconds)
.queryString
query shouldEqual s"UPDATE phantom.recipes USING TTL 5 SET uid = $uid WHERE url = '$url';"
}
"specify a consistency level in a ConditionUpdateQuery" in {
val url = gen[String]
val query = TestDatabase.recipes.update()
.where(_.url eqs url)
.modify(_.servings setTo Some(comparisonValue))
.onlyIf(_.description is Some("test"))
.consistencyLevel_=(ConsistencyLevel.ALL)
.queryString
if (session.v3orNewer) {
query shouldEqual s"UPDATE phantom.recipes SET servings = 5 WHERE url = '$url' IF description = 'test';"
} else {
query shouldEqual s"UPDATE phantom.recipes USING CONSISTENCY ALL SET servings = 5 WHERE url = '$url' IF description = 'test';"
}
}
"specify a non equals clause inside an ConditionUpdateQuery" in {
val url = gen[String]
val query = TestDatabase.recipes.update()
.where(_.url eqs url)
.modify(_.servings setTo Some(comparisonValue))
.onlyIf(_.description isNot Some("test"))
.queryString
query shouldEqual s"UPDATE phantom.recipes SET servings = 5 WHERE url = '$url' IF description != 'test';"
}
"specify a gt clause inside an ConditionUpdateQuery" in {
val url = gen[String]
val query = TestDatabase.recipes.update()
.where(_.url eqs url)
.modify(_.servings setTo Some(comparisonValue))
.onlyIf(_.description isGt Some("test"))
.queryString
query shouldEqual s"UPDATE phantom.recipes SET servings = 5 WHERE url = '$url' IF description > 'test';"
}
"specify a gte clause inside an ConditionalUpdateQuery" in {
val url = gen[String]
val query = TestDatabase.recipes.update()
.where(_.url eqs url)
.modify(_.servings setTo Some(comparisonValue))
.onlyIf(_.description isGte Some("test"))
.queryString
query shouldEqual s"UPDATE phantom.recipes SET servings = 5 WHERE url = '$url' IF description >= 'test';"
}
"specify a lt clause inside an ConditionalUpdateQuery" in {
val url = gen[String]
val query = TestDatabase.recipes.update()
.where(_.url eqs url)
.modify(_.servings setTo Some(comparisonValue))
.onlyIf(_.description isLt Some("test"))
.queryString
query shouldEqual s"UPDATE phantom.recipes SET servings = 5 WHERE url = '$url' IF description < 'test';"
}
"specify a lte clause inside an ConditionalUpdateQuery" in {
val url = gen[String]
val query = TestDatabase.recipes.update()
.where(_.url eqs url)
.modify(_.servings setTo Some(comparisonValue))
.onlyIf(_.description isLte Some("test"))
.queryString
query shouldEqual s"UPDATE phantom.recipes SET servings = 5 WHERE url = '$url' IF description <= 'test';"
}
"update a single entry inside a map column using a string apply" in {
val url = gen[String]
val query = TestDatabase.recipes.update
.where(_.url eqs url)
.modify(_.props("test") setTo "test2")
.queryString
query shouldEqual s"UPDATE phantom.recipes SET props['test'] = 'test2' WHERE url = '$url';"
}
"update a single entry inside a map column using an int column" in {
val id = gen[UUID]
val dt = new DateTime
val query = TestDatabase.events.update
.where(_.id eqs id)
.modify(_.map(5L) setTo dt)
.queryString
query shouldEqual s"UPDATE phantom.events SET map[5] = ${DateTimeIsPrimitive.asCql(dt)} WHERE id = $id;"
}
}
}
}
|
levinson/phantom
|
phantom-dsl/src/test/scala/com/websudos/phantom/builder/serializers/UpdateQuerySerializationTest.scala
|
Scala
|
bsd-2-clause
| 6,599 |
package dotty.tools
package dotc
package typer
import core._
import ast._
import Contexts._
import Types._
import Flags._
import Denotations._
import Names._
import StdNames._
import NameOps._
import Symbols._
import Trees._
import ProtoTypes._
import Constants._
import Scopes._
import annotation.unchecked
import util.Positions._
import util.{Stats, SimpleMap}
import util.common._
import transform.SymUtils._
import Decorators._
import Uniques._
import ErrorReporting.{err, errorType, DiagnosticString}
import config.Printers._
import collection.mutable
import SymDenotations.NoCompleter
object Checking {
import tpd._
/** A general checkBounds method that can be used for TypeApply nodes as
* well as for AppliedTypeTree nodes.
*/
def checkBounds(args: List[tpd.Tree], boundss: List[TypeBounds], instantiate: (Type, List[Type]) => Type)(implicit ctx: Context) =
for ((arg, which, bound) <- ctx.boundsViolations(args, boundss, instantiate))
ctx.error(
d"Type argument ${arg.tpe} does not conform to $which bound $bound ${err.whyNoMatchStr(arg.tpe, bound)}",
arg.pos)
/** Check that `tp` refers to a nonAbstract class
* and that the instance conforms to the self type of the created class.
*/
def checkInstantiable(tp: Type, pos: Position)(implicit ctx: Context): Unit =
tp.underlyingClassRef(refinementOK = false) match {
case tref: TypeRef =>
val cls = tref.symbol
if (cls.is(AbstractOrTrait))
ctx.error(d"$cls is abstract; cannot be instantiated", pos)
if (!cls.is(Module)) {
// Create a synthetic singleton type instance, and check whether
// it conforms to the self type of the class as seen from that instance.
val stp = SkolemType(tp)
val selfType = tref.givenSelfType.asSeenFrom(stp, cls)
if (selfType.exists && !(stp <:< selfType))
ctx.error(d"$tp does not conform to its self type $selfType; cannot be instantiated")
}
case _ =>
}
/** A type map which checks that the only cycles in a type are F-bounds
* and that protects all F-bounded references by LazyRefs.
*/
class CheckNonCyclicMap(sym: Symbol, reportErrors: Boolean)(implicit ctx: Context) extends TypeMap {
/** Are cycles allowed within nested refinedInfos of currently checked type? */
private var nestedCycleOK = false
/** Are cycles allowed within currently checked type? */
private var cycleOK = false
/** A diagnostic output string that indicates the position of the last
* part of a type bounds checked by checkInfo. Possible choices:
* alias, lower bound, upper bound.
*/
var where: String = ""
/** The last type top-level type checked when a CyclicReference occurs. */
var lastChecked: Type = NoType
/** Check info `tp` for cycles. Throw CyclicReference for illegal cycles,
* break direct cycle with a LazyRef for legal, F-bounded cycles.
*/
def checkInfo(tp: Type): Type = tp match {
case tp @ TypeAlias(alias) =>
try tp.derivedTypeAlias(apply(alias))
finally {
where = "alias"
lastChecked = alias
}
case tp @ TypeBounds(lo, hi) =>
val lo1 = try apply(lo) finally {
where = "lower bound"
lastChecked = lo
}
val saved = nestedCycleOK
nestedCycleOK = true
try tp.derivedTypeBounds(lo1, apply(hi))
finally {
nestedCycleOK = saved
where = "upper bound"
lastChecked = hi
}
case _ =>
tp
}
def apply(tp: Type) = tp match {
case tp: TermRef =>
this(tp.info)
mapOver(tp)
case tp @ RefinedType(parent, name) =>
val parent1 = this(parent)
val saved = cycleOK
cycleOK = nestedCycleOK
try tp.derivedRefinedType(parent1, name, this(tp.refinedInfo))
finally cycleOK = saved
case tp @ TypeRef(pre, name) =>
try {
// A prefix is interesting if it might contain (transitively) a reference
// to symbol `sym` itself. We only check references with interesting
// prefixes for cycles. This pruning is done in order not to force
// global symbols when doing the cyclicity check.
def isInteresting(prefix: Type): Boolean = prefix.stripTypeVar match {
case NoPrefix => true
case prefix: ThisType => sym.owner.isClass && prefix.cls.isContainedIn(sym.owner)
case prefix: NamedType => !prefix.symbol.isStaticOwner && isInteresting(prefix.prefix)
case SuperType(thistp, _) => isInteresting(thistp)
case AndType(tp1, tp2) => isInteresting(tp1) || isInteresting(tp2)
case OrType(tp1, tp2) => isInteresting(tp1) && isInteresting(tp2)
case _: RefinedType => false
// Note: it's important not to visit parents of RefinedTypes,
// since otherwise spurious #Apply projections might be inserted.
case _ => false
}
// If prefix is interesting, check info of typeref recursively, marking the referred symbol
// with NoCompleter. This provokes a CyclicReference when the symbol
// is hit again. Without this precaution we could stackoverflow here.
if (isInteresting(pre)) {
val info = tp.info
val symInfo = tp.symbol.info
if (tp.symbol.exists) tp.symbol.info = SymDenotations.NoCompleter
try checkInfo(info)
finally if (tp.symbol.exists) tp.symbol.info = symInfo
}
tp
} catch {
case ex: CyclicReference =>
ctx.debuglog(i"cycle detected for $tp, $nestedCycleOK, $cycleOK")
if (cycleOK) LazyRef(() => tp)
else if (reportErrors) throw ex
else tp
}
case _ => mapOver(tp)
}
}
/** Check that `info` of symbol `sym` is not cyclic.
* @pre sym is not yet initialized (i.e. its type is a Completer).
* @return `info` where every legal F-bounded reference is proctected
* by a `LazyRef`, or `ErrorType` if a cycle was detected and reported.
* Furthermore: Add an #Apply to a fully instantiated type lambda, if none was
* given before. This is necessary here because sometimes type lambdas are not
* recognized when they are first formed.
*/
def checkNonCyclic(sym: Symbol, info: Type, reportErrors: Boolean)(implicit ctx: Context): Type = {
val checker = new CheckNonCyclicMap(sym, reportErrors)(ctx.addMode(Mode.CheckCyclic))
try checker.checkInfo(info)
catch {
case ex: CyclicReference =>
if (reportErrors) {
ctx.error(i"illegal cyclic reference: ${checker.where} ${checker.lastChecked} of $sym refers back to the type itself", sym.pos)
ErrorType
}
else info
}
}
/** Check that refinement satisfies the following two conditions
* 1. No part of it refers to a symbol that's defined in the same refinement
* at a textually later point.
* 2. All references to the refinement itself via `this` are followed by
* selections.
* Note: It's not yet clear what exactly we want to allow and what we want to rule out.
* This depends also on firming up the DOT calculus. For the moment we only issue
* deprecated warnings, not errors.
*/
def checkRefinementNonCyclic(refinement: Tree, refineCls: ClassSymbol, seen: mutable.Set[Symbol])
(implicit ctx: Context): Unit = {
def flag(what: String, tree: Tree) =
ctx.deprecationWarning(i"$what reference in refinement is deprecated", tree.pos)
def forwardRef(tree: Tree) = flag("forward", tree)
def selfRef(tree: Tree) = flag("self", tree)
val checkTree = new TreeAccumulator[Unit] {
def checkRef(tree: Tree, sym: Symbol) =
if (sym.maybeOwner == refineCls && !seen(sym)) forwardRef(tree)
def apply(x: Unit, tree: Tree)(implicit ctx: Context) = tree match {
case tree: MemberDef =>
foldOver(x, tree)
seen += tree.symbol
case tree @ Select(This(_), _) =>
checkRef(tree, tree.symbol)
case tree: RefTree =>
checkRef(tree, tree.symbol)
foldOver(x, tree)
case tree: This =>
selfRef(tree)
case tree: TypeTree =>
val checkType = new TypeAccumulator[Unit] {
def apply(x: Unit, tp: Type): Unit = tp match {
case tp: NamedType =>
checkRef(tree, tp.symbol)
tp.prefix match {
case pre: ThisType =>
case pre => foldOver(x, pre)
}
case tp: ThisType if tp.cls == refineCls =>
selfRef(tree)
case _ =>
foldOver(x, tp)
}
}
checkType((), tree.tpe)
case _ =>
foldOver(x, tree)
}
}
checkTree((), refinement)
}
}
trait Checking {
import tpd._
def checkNonCyclic(sym: Symbol, info: TypeBounds, reportErrors: Boolean)(implicit ctx: Context): Type =
Checking.checkNonCyclic(sym, info, reportErrors)
/** Check that Java statics and packages can only be used in selections.
*/
def checkValue(tree: Tree, proto: Type)(implicit ctx: Context): tree.type = {
if (!proto.isInstanceOf[SelectionProto]) {
val sym = tree.tpe.termSymbol
// The check is avoided inside Java compilation units because it always fails
// on the singleton type Module.type.
if ((sym is Package) || ((sym is JavaModule) && !ctx.compilationUnit.isJava)) ctx.error(d"$sym is not a value", tree.pos)
}
tree
}
/** Check that type arguments `args` conform to corresponding bounds in `poly`
* Note: This does not check the bounds of AppliedTypeTrees. These
* are handled by method checkBounds in FirstTransform
*/
def checkBounds(args: List[tpd.Tree], poly: PolyType)(implicit ctx: Context): Unit =
Checking.checkBounds(args, poly.paramBounds, _.substParams(poly, _))
/** Check that type `tp` is stable. */
def checkStable(tp: Type, pos: Position)(implicit ctx: Context): Unit =
if (!tp.isStable && !tp.isErroneous)
ctx.error(d"$tp is not stable", pos)
/** Check that `tp` is a class type with a stable prefix. Also, if `traitReq` is
* true check that `tp` is a trait.
* Stability checking is disabled in phases after RefChecks.
* @return `tp` itself if it is a class or trait ref, ObjectClass.typeRef if not.
*/
def checkClassTypeWithStablePrefix(tp: Type, pos: Position, traitReq: Boolean)(implicit ctx: Context): Type =
tp.underlyingClassRef(refinementOK = false) match {
case tref: TypeRef =>
if (ctx.phase <= ctx.refchecksPhase) checkStable(tref.prefix, pos)
if (traitReq && !(tref.symbol is Trait)) ctx.error(d"$tref is not a trait", pos)
tp
case _ =>
ctx.error(d"$tp is not a class type", pos)
defn.ObjectClass.typeRef
}
/** Check that a non-implicit parameter making up the first parameter section of an
* implicit conversion is not a singleton type.
*/
def checkImplicitParamsNotSingletons(vparamss: List[List[ValDef]])(implicit ctx: Context): Unit = vparamss match {
case (vparam :: Nil) :: _ if !(vparam.symbol is Implicit) =>
if (vparam.tpt.tpe.isInstanceOf[SingletonType])
ctx.error(s"implicit conversion may not have a parameter of singleton type", vparam.tpt.pos)
case _ =>
}
/** Check that any top-level type arguments in this type are feasible, i.e. that
* their lower bound conforms to their upper cound. If a type argument is
* infeasible, issue and error and continue with upper bound.
*/
def checkFeasible(tp: Type, pos: Position, where: => String = "")(implicit ctx: Context): Type = tp match {
case tp: RefinedType =>
tp.derivedRefinedType(tp.parent, tp.refinedName, checkFeasible(tp.refinedInfo, pos, where))
case tp @ TypeBounds(lo, hi) if !(lo <:< hi) =>
ctx.error(d"no type exists between low bound $lo and high bound $hi$where", pos)
TypeAlias(hi)
case _ =>
tp
}
/** Check that class does not define same symbol twice */
def checkNoDoubleDefs(cls: Symbol)(implicit ctx: Context): Unit = {
val seen = new mutable.HashMap[Name, List[Symbol]] {
override def default(key: Name) = Nil
}
typr.println(i"check no double defs $cls")
def checkDecl(decl: Symbol): Unit = {
for (other <- seen(decl.name)) {
typr.println(i"conflict? $decl $other")
if (decl.matches(other)) {
def doubleDefError(decl: Symbol, other: Symbol): Unit = {
def ofType = if (decl.isType) "" else d": ${other.info}"
def explanation =
if (!decl.isRealMethod) ""
else "\\n (the definitions have matching type signatures)"
ctx.error(d"$decl is already defined as $other$ofType$explanation", decl.pos)
}
if (decl is Synthetic) doubleDefError(other, decl)
else doubleDefError(decl, other)
}
if ((decl is HasDefaultParams) && (other is HasDefaultParams)) {
ctx.error(d"two or more overloaded variants of $decl have default arguments")
decl resetFlag HasDefaultParams
}
}
seen(decl.name) = decl :: seen(decl.name)
}
cls.info.decls.foreach(checkDecl)
cls.info match {
case ClassInfo(_, _, _, _, selfSym: Symbol) => checkDecl(selfSym)
case _ =>
}
}
def checkParentCall(call: Tree, caller: ClassSymbol)(implicit ctx: Context) =
if (!ctx.isAfterTyper) {
val called = call.tpe.classSymbol
if (caller is Trait)
ctx.error(i"$caller may not call constructor of $called", call.pos)
else if (called.is(Trait) && !caller.mixins.contains(called))
ctx.error(i"""$called is already implemented by super${caller.superClass},
|its constructor cannot be called again""".stripMargin, call.pos)
}
}
trait NoChecking extends Checking {
import tpd._
override def checkNonCyclic(sym: Symbol, info: TypeBounds, reportErrors: Boolean)(implicit ctx: Context): Type = info
override def checkValue(tree: Tree, proto: Type)(implicit ctx: Context): tree.type = tree
override def checkBounds(args: List[tpd.Tree], poly: PolyType)(implicit ctx: Context): Unit = ()
override def checkStable(tp: Type, pos: Position)(implicit ctx: Context): Unit = ()
override def checkClassTypeWithStablePrefix(tp: Type, pos: Position, traitReq: Boolean)(implicit ctx: Context): Type = tp
override def checkImplicitParamsNotSingletons(vparamss: List[List[ValDef]])(implicit ctx: Context): Unit = ()
override def checkFeasible(tp: Type, pos: Position, where: => String = "")(implicit ctx: Context): Type = tp
override def checkNoDoubleDefs(cls: Symbol)(implicit ctx: Context): Unit = ()
override def checkParentCall(call: Tree, caller: ClassSymbol)(implicit ctx: Context) = ()
}
|
VladimirNik/dotty
|
src/dotty/tools/dotc/typer/Checking.scala
|
Scala
|
bsd-3-clause
| 15,123 |
package vaadin.scala
import vaadin.scala.internal.UploadReceiver
import vaadin.scala.internal.UploadProgressListener
import vaadin.scala.internal.UploadStartedListener
import vaadin.scala.internal.UploadFinishedListener
import vaadin.scala.internal.UploadFailedListener
import vaadin.scala.internal.UploadSucceededListener
import vaadin.scala.internal.ListenersTrait
import vaadin.scala.mixins.UploadMixin
package mixins {
trait UploadMixin extends AbstractComponentMixin
}
object Upload {
case class ReceiveEvent(filename: String, mimeType: String)
case class ProgressEvent(readBytes: Long, contentLength: Long)
case class StartedEvent(upload: Upload, filename: String, mimeType: String, contentLength: Long)
case class FinishedEvent(upload: Upload, filename: String, mimeType: String, contentLength: Long)
case class FailedEvent(upload: Upload, filename: String, mimeType: String, contentLength: Long, reason: Exception)
case class SucceededEvent(upload: Upload, filename: String, mimeType: String, contentLength: Long)
}
/**
* @see com.vaadin.ui.Upload
* @author Henri Kerola / Vaadin
*/
class Upload(override val p: com.vaadin.ui.Upload with UploadMixin = new com.vaadin.ui.Upload with UploadMixin) extends AbstractComponent(p) with Focusable {
def receiver: Option[Upload.ReceiveEvent => java.io.OutputStream] = p.getReceiver match {
case null => None
case receiver: UploadReceiver => Some(receiver.receiver)
}
def receiver_=(receiver: Upload.ReceiveEvent => java.io.OutputStream): Unit = {
p.setReceiver(new UploadReceiver(receiver))
}
def receiver_=(receiverOption: Option[Upload.ReceiveEvent => java.io.OutputStream]): Unit = receiverOption match {
case None => p.setReceiver(null)
case Some(r) => receiver = r
}
def interruptUpload() = p.interruptUpload()
def uploading = p.isUploading
def bytesRead: Long = p.getBytesRead
def uploadSize: Long = p.getUploadSize
def buttonCaption = Option(p.getButtonCaption)
def buttonCaption_=(buttonCaption: Option[String]) = p.setButtonCaption(buttonCaption.orNull)
def buttonCaption_=(buttonCaption: String) = p.setButtonCaption(buttonCaption)
def submitUpload() = p.submitUpload()
lazy val progressListeners = new ListenersTrait[Upload.ProgressEvent, UploadProgressListener] {
override def listeners = p.getListeners(classOf[com.vaadin.terminal.StreamVariable.StreamingProgressEvent])
override def addListener(elem: Upload.ProgressEvent => Unit) = p.addListener(new UploadProgressListener(elem))
override def removeListener(elem: UploadProgressListener) = p.removeListener(elem)
}
lazy val startedListeners = new ListenersTrait[Upload.StartedEvent, UploadStartedListener] {
override def listeners = p.getListeners(classOf[com.vaadin.ui.Upload.StartedEvent])
override def addListener(elem: Upload.StartedEvent => Unit) = p.addListener(new UploadStartedListener(elem))
override def removeListener(elem: UploadStartedListener) = p.removeListener(elem)
}
lazy val finishedListeners = new ListenersTrait[Upload.FinishedEvent, UploadFinishedListener] {
override def listeners = p.getListeners(classOf[com.vaadin.ui.Upload.FinishedEvent])
override def addListener(elem: Upload.FinishedEvent => Unit) = p.addListener(new UploadFinishedListener(elem))
override def removeListener(elem: UploadFinishedListener) = p.removeListener(elem)
}
lazy val failedListeners = new ListenersTrait[Upload.FailedEvent, UploadFailedListener] {
override def listeners = p.getListeners(classOf[com.vaadin.ui.Upload.FailedEvent])
override def addListener(elem: Upload.FailedEvent => Unit) = p.addListener(new UploadFailedListener(elem))
override def removeListener(elem: UploadFailedListener) = p.removeListener(elem)
}
lazy val succeededListeners = new ListenersTrait[Upload.SucceededEvent, UploadSucceededListener] {
override def listeners = p.getListeners(classOf[com.vaadin.ui.Upload.SucceededEvent])
override def addListener(elem: Upload.SucceededEvent => Unit) = p.addListener(new UploadSucceededListener(elem))
override def removeListener(elem: UploadSucceededListener) = p.removeListener(elem)
}
}
|
CloudInABox/scalavaadinutils
|
src/main/scala/vaadin/scala/Upload.scala
|
Scala
|
mit
| 4,188 |
/*
* Copyright (c) 2015 Daniel Higuero.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.spark.examples.streaming
import java.util.Calendar
import org.apache.log4j.{Level, Logger}
import org.apache.spark.SparkConf
import org.apache.spark.storage.StorageLevel
import org.apache.spark.streaming.{Seconds, StreamingContext}
/**
* Code here the solution for the proposed exercises.
*/
object ejercicio4 {
/**
* Field separator.
*/
val Separator = ";";
/**
* Threshold that determines when a number of failed auth entries is considered an attack.
*/
val ThresholdAuth = 1;
/**
* Threshold that determines when a number of failed web access entries is considered an attack.
*/
val ThresholdWeb = 1;
def main(args: Array[String]): Unit = {
//Suppress Spark output
Logger.getLogger("org").setLevel(Level.ERROR)
Logger.getLogger("akka").setLevel(Level.ERROR)
//Define the Spark configuration. In this case we are using the local mode
val sparkConf = new SparkConf().setMaster("local[4]").setAppName("ReadingLogs_exercise2")
//Define a SparkStreamingContext with a batch interval of 10 seconds
val ssc = new StreamingContext(sparkConf, Seconds(10))
//Este no lo uso. Lo cargo en caché para que no me de un error de java que estropea el resultado
val events = ssc.socketTextStream("localhost", 10002, StorageLevel.MEMORY_AND_DISK_SER)
//Filter out empty lines and print the count
val numberEventsRDD = events.map(x => {
val arr = x.split(';')
new WebEvent(arr(0), arr(1), arr(2), arr(3), arr(4))
}).foreachRDD(x=>x.cache())
// Voy a usar el de las autorizaciones. hago un print para que me de los tipos de lineas
// Given the previous stream obtained in Exercise 3, filter those hosts that are already blacklisted in the system.
// In order to do that, generate an RDD with the contents of a file that contains:
val autorizacion = ssc.socketTextStream("localhost", 10001, StorageLevel.MEMORY_AND_DISK_SER)
val numberEvents_authRDD = autorizacion.map(x => {
val arr = x.split(';')
new AuthEvent(arr(0), arr(1), arr(2), arr(3))
})
//Como del ejercicio anterior me salieron los hosts 3 5 6 y 9, los voy a meter en un RDD
val lista = ssc.sparkContext.parallelize(List("host3","host5","host6","host9"))
val atacantes = lista.map(x => (x,0))
//Obtengo la fecha del sistema para imprimirla
val today = Calendar.getInstance().getTime()
//primero filtro los que son ataque y los sumo
val comunRDD =numberEvents_authRDD.filter(x => x.Message.contains("failed"))
.map(x => (x.Source, 1))
.reduceByKeyAndWindow((acum,nuevo)=>acum+nuevo,Seconds(10))
.transform(rdd => rdd.join(atacantes).filter(x => x._2._2 == 0).map(x => (x._1,x._2._1)))
.foreachRDD(rdd => {
println()
println(today)
println("Numero total de ataques por los servidores de la lista: " + rdd.reduce((x, y) => ("Total", x._2 + y._2))._2)
println()
println("Numero de servidores que atacan: " + rdd.count())
rdd.reduceByKey((y,z)=>y+z).foreach(x=>println(x._1 + " : " + x._2))
println()
})
//Start the streaming context
ssc.start()
ssc.awaitTermination()
}
}
|
anazamarron/spark-streaming-exercises
|
src/main/scala/org/spark/examples/streaming/ejercicio4.scala
|
Scala
|
apache-2.0
| 3,799 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler.cluster.mesos
import java.io.File
import java.util.{Collections, List => JList}
import java.util.concurrent.atomic.{AtomicBoolean, AtomicLong}
import java.util.concurrent.locks.ReentrantLock
import org.apache.mesos.Protos.{TaskInfo => MesosTaskInfo, _}
import org.apache.mesos.SchedulerDriver
import scala.collection.JavaConverters._
import scala.collection.mutable
import scala.concurrent.Future
import org.apache.spark.{SecurityManager, SparkContext, SparkException, TaskState}
import org.apache.spark.deploy.mesos.config._
import org.apache.spark.deploy.security.HadoopDelegationTokenManager
import org.apache.spark.internal.config
import org.apache.spark.network.netty.SparkTransportConf
import org.apache.spark.network.shuffle.mesos.MesosExternalShuffleClient
import org.apache.spark.rpc.RpcEndpointAddress
import org.apache.spark.scheduler.{SlaveLost, TaskSchedulerImpl}
import org.apache.spark.scheduler.cluster.CoarseGrainedSchedulerBackend
import org.apache.spark.util.Utils
/**
* A SchedulerBackend that runs tasks on Mesos, but uses "coarse-grained" tasks, where it holds
* onto each Mesos node for the duration of the Spark job instead of relinquishing cores whenever
* a task is done. It launches Spark tasks within the coarse-grained Mesos tasks using the
* CoarseGrainedSchedulerBackend mechanism. This class is useful for lower and more predictable
* latency.
*
* Unfortunately this has a bit of duplication from [[MesosFineGrainedSchedulerBackend]],
* but it seems hard to remove this.
*/
private[spark] class MesosCoarseGrainedSchedulerBackend(
scheduler: TaskSchedulerImpl,
sc: SparkContext,
master: String,
securityManager: SecurityManager)
extends CoarseGrainedSchedulerBackend(scheduler, sc.env.rpcEnv)
with org.apache.mesos.Scheduler with MesosSchedulerUtils {
override def hadoopDelegationTokenManager: Option[HadoopDelegationTokenManager] =
Some(new HadoopDelegationTokenManager(sc.conf, sc.hadoopConfiguration))
// Blacklist a slave after this many failures
private val MAX_SLAVE_FAILURES = 2
private val maxCoresOption = conf.getOption("spark.cores.max").map(_.toInt)
private val executorCoresOption = conf.getOption("spark.executor.cores").map(_.toInt)
private val minCoresPerExecutor = executorCoresOption.getOrElse(1)
// Maximum number of cores to acquire
private val maxCores = {
val cores = maxCoresOption.getOrElse(Int.MaxValue)
// Set maxCores to a multiple of smallest executor we can launch
cores - (cores % minCoresPerExecutor)
}
private val useFetcherCache = conf.getBoolean("spark.mesos.fetcherCache.enable", false)
private val maxGpus = conf.getInt("spark.mesos.gpus.max", 0)
private val taskLabels = conf.get("spark.mesos.task.labels", "")
private[this] val shutdownTimeoutMS =
conf.getTimeAsMs("spark.mesos.coarse.shutdownTimeout", "10s")
.ensuring(_ >= 0, "spark.mesos.coarse.shutdownTimeout must be >= 0")
// Synchronization protected by stateLock
private[this] var stopCalled: Boolean = false
// If shuffle service is enabled, the Spark driver will register with the shuffle service.
// This is for cleaning up shuffle files reliably.
private val shuffleServiceEnabled = conf.getBoolean("spark.shuffle.service.enabled", false)
// Cores we have acquired with each Mesos task ID
private val coresByTaskId = new mutable.HashMap[String, Int]
private val gpusByTaskId = new mutable.HashMap[String, Int]
private var totalCoresAcquired = 0
private var totalGpusAcquired = 0
// SlaveID -> Slave
// This map accumulates entries for the duration of the job. Slaves are never deleted, because
// we need to maintain e.g. failure state and connection state.
private val slaves = new mutable.HashMap[String, Slave]
/**
* The total number of executors we aim to have. Undefined when not using dynamic allocation.
* Initially set to 0 when using dynamic allocation, the executor allocation manager will send
* the real initial limit later.
*/
private var executorLimitOption: Option[Int] = {
if (Utils.isDynamicAllocationEnabled(conf)) {
Some(0)
} else {
None
}
}
/**
* Return the current executor limit, which may be [[Int.MaxValue]]
* before properly initialized.
*/
private[mesos] def executorLimit: Int = executorLimitOption.getOrElse(Int.MaxValue)
// private lock object protecting mutable state above. Using the intrinsic lock
// may lead to deadlocks since the superclass might also try to lock
private val stateLock = new ReentrantLock
private val extraCoresPerExecutor = conf.getInt("spark.mesos.extra.cores", 0)
// Offer constraints
private val slaveOfferConstraints =
parseConstraintString(sc.conf.get("spark.mesos.constraints", ""))
// Reject offers with mismatched constraints in seconds
private val rejectOfferDurationForUnmetConstraints =
getRejectOfferDurationForUnmetConstraints(sc.conf)
// Reject offers when we reached the maximum number of cores for this framework
private val rejectOfferDurationForReachedMaxCores =
getRejectOfferDurationForReachedMaxCores(sc.conf)
// A client for talking to the external shuffle service
private val mesosExternalShuffleClient: Option[MesosExternalShuffleClient] = {
if (shuffleServiceEnabled) {
Some(getShuffleClient())
} else {
None
}
}
// This method is factored out for testability
protected def getShuffleClient(): MesosExternalShuffleClient = {
new MesosExternalShuffleClient(
SparkTransportConf.fromSparkConf(conf, "shuffle"),
securityManager,
securityManager.isAuthenticationEnabled(),
conf.get(config.SHUFFLE_REGISTRATION_TIMEOUT))
}
private var nextMesosTaskId = 0
@volatile var appId: String = _
private var schedulerDriver: SchedulerDriver = _
def newMesosTaskId(): String = {
val id = nextMesosTaskId
nextMesosTaskId += 1
id.toString
}
override def start() {
super.start()
val startedBefore = IdHelper.startedBefore.getAndSet(true)
val suffix = if (startedBefore) {
f"-${IdHelper.nextSCNumber.incrementAndGet()}%04d"
} else {
""
}
val driver = createSchedulerDriver(
master,
MesosCoarseGrainedSchedulerBackend.this,
sc.sparkUser,
sc.appName,
sc.conf,
sc.conf.getOption("spark.mesos.driver.webui.url").orElse(sc.ui.map(_.webUrl)),
None,
Some(sc.conf.get(DRIVER_FAILOVER_TIMEOUT)),
sc.conf.getOption("spark.mesos.driver.frameworkId").map(_ + suffix)
)
startScheduler(driver)
}
def createCommand(offer: Offer, numCores: Int, taskId: String): CommandInfo = {
val environment = Environment.newBuilder()
val extraClassPath = conf.getOption("spark.executor.extraClassPath")
extraClassPath.foreach { cp =>
environment.addVariables(
Environment.Variable.newBuilder().setName("SPARK_EXECUTOR_CLASSPATH").setValue(cp).build())
}
val extraJavaOpts = conf.get("spark.executor.extraJavaOptions", "")
// Set the environment variable through a command prefix
// to append to the existing value of the variable
val prefixEnv = conf.getOption("spark.executor.extraLibraryPath").map { p =>
Utils.libraryPathEnvPrefix(Seq(p))
}.getOrElse("")
environment.addVariables(
Environment.Variable.newBuilder()
.setName("SPARK_EXECUTOR_OPTS")
.setValue(extraJavaOpts)
.build())
sc.executorEnvs.foreach { case (key, value) =>
environment.addVariables(Environment.Variable.newBuilder()
.setName(key)
.setValue(value)
.build())
}
val command = CommandInfo.newBuilder()
.setEnvironment(environment)
val uri = conf.getOption("spark.executor.uri")
.orElse(Option(System.getenv("SPARK_EXECUTOR_URI")))
if (uri.isEmpty) {
val executorSparkHome = conf.getOption("spark.mesos.executor.home")
.orElse(sc.getSparkHome())
.getOrElse {
throw new SparkException("Executor Spark home `spark.mesos.executor.home` is not set!")
}
val runScript = new File(executorSparkHome, "./bin/spark-class").getPath
command.setValue(
"%s \"%s\" org.apache.spark.executor.CoarseGrainedExecutorBackend"
.format(prefixEnv, runScript) +
s" --driver-url $driverURL" +
s" --executor-id $taskId" +
s" --hostname ${executorHostname(offer)}" +
s" --cores $numCores" +
s" --app-id $appId")
} else {
// Grab everything to the first '.'. We'll use that and '*' to
// glob the directory "correctly".
val basename = uri.get.split('/').last.split('.').head
command.setValue(
s"cd $basename*; $prefixEnv " +
"./bin/spark-class org.apache.spark.executor.CoarseGrainedExecutorBackend" +
s" --driver-url $driverURL" +
s" --executor-id $taskId" +
s" --hostname ${executorHostname(offer)}" +
s" --cores $numCores" +
s" --app-id $appId")
command.addUris(CommandInfo.URI.newBuilder().setValue(uri.get).setCache(useFetcherCache))
}
conf.getOption("spark.mesos.uris").foreach(setupUris(_, command, useFetcherCache))
command.build()
}
protected def driverURL: String = {
if (conf.contains("spark.testing")) {
"driverURL"
} else {
RpcEndpointAddress(
conf.get("spark.driver.host"),
conf.get("spark.driver.port").toInt,
CoarseGrainedSchedulerBackend.ENDPOINT_NAME).toString
}
}
override def offerRescinded(d: org.apache.mesos.SchedulerDriver, o: OfferID) {}
override def registered(
driver: org.apache.mesos.SchedulerDriver,
frameworkId: FrameworkID,
masterInfo: MasterInfo) {
this.appId = frameworkId.getValue
this.mesosExternalShuffleClient.foreach(_.init(appId))
this.schedulerDriver = driver
markRegistered()
}
override def sufficientResourcesRegistered(): Boolean = {
totalCoreCount.get >= maxCoresOption.getOrElse(0) * minRegisteredRatio
}
override def disconnected(d: org.apache.mesos.SchedulerDriver) {}
override def reregistered(d: org.apache.mesos.SchedulerDriver, masterInfo: MasterInfo) {}
/**
* Method called by Mesos to offer resources on slaves. We respond by launching an executor,
* unless we've already launched more than we wanted to.
*/
override def resourceOffers(d: org.apache.mesos.SchedulerDriver, offers: JList[Offer]) {
stateLock.synchronized {
if (stopCalled) {
logDebug("Ignoring offers during shutdown")
// Driver should simply return a stopped status on race
// condition between this.stop() and completing here
offers.asScala.map(_.getId).foreach(d.declineOffer)
return
}
logDebug(s"Received ${offers.size} resource offers.")
val (matchedOffers, unmatchedOffers) = offers.asScala.partition { offer =>
val offerAttributes = toAttributeMap(offer.getAttributesList)
matchesAttributeRequirements(slaveOfferConstraints, offerAttributes)
}
declineUnmatchedOffers(d, unmatchedOffers)
handleMatchedOffers(d, matchedOffers)
}
}
private def declineUnmatchedOffers(
driver: org.apache.mesos.SchedulerDriver, offers: mutable.Buffer[Offer]): Unit = {
offers.foreach { offer =>
declineOffer(
driver,
offer,
Some("unmet constraints"),
Some(rejectOfferDurationForUnmetConstraints))
}
}
/**
* Launches executors on accepted offers, and declines unused offers. Executors are launched
* round-robin on offers.
*
* @param driver SchedulerDriver
* @param offers Mesos offers that match attribute constraints
*/
private def handleMatchedOffers(
driver: org.apache.mesos.SchedulerDriver, offers: mutable.Buffer[Offer]): Unit = {
val tasks = buildMesosTasks(offers)
for (offer <- offers) {
val offerAttributes = toAttributeMap(offer.getAttributesList)
val offerMem = getResource(offer.getResourcesList, "mem")
val offerCpus = getResource(offer.getResourcesList, "cpus")
val offerPorts = getRangeResource(offer.getResourcesList, "ports")
val id = offer.getId.getValue
if (tasks.contains(offer.getId)) { // accept
val offerTasks = tasks(offer.getId)
logDebug(s"Accepting offer: $id with attributes: $offerAttributes " +
s"mem: $offerMem cpu: $offerCpus ports: $offerPorts." +
s" Launching ${offerTasks.size} Mesos tasks.")
for (task <- offerTasks) {
val taskId = task.getTaskId
val mem = getResource(task.getResourcesList, "mem")
val cpus = getResource(task.getResourcesList, "cpus")
val ports = getRangeResource(task.getResourcesList, "ports").mkString(",")
logDebug(s"Launching Mesos task: ${taskId.getValue} with mem: $mem cpu: $cpus" +
s" ports: $ports")
}
driver.launchTasks(
Collections.singleton(offer.getId),
offerTasks.asJava)
} else if (totalCoresAcquired >= maxCores) {
// Reject an offer for a configurable amount of time to avoid starving other frameworks
declineOffer(driver,
offer,
Some("reached spark.cores.max"),
Some(rejectOfferDurationForReachedMaxCores))
} else {
declineOffer(
driver,
offer)
}
}
}
/**
* Returns a map from OfferIDs to the tasks to launch on those offers. In order to maximize
* per-task memory and IO, tasks are round-robin assigned to offers.
*
* @param offers Mesos offers that match attribute constraints
* @return A map from OfferID to a list of Mesos tasks to launch on that offer
*/
private def buildMesosTasks(offers: mutable.Buffer[Offer]): Map[OfferID, List[MesosTaskInfo]] = {
// offerID -> tasks
val tasks = new mutable.HashMap[OfferID, List[MesosTaskInfo]].withDefaultValue(Nil)
// offerID -> resources
val remainingResources = mutable.Map(offers.map(offer =>
(offer.getId.getValue, offer.getResourcesList)): _*)
var launchTasks = true
// TODO(mgummelt): combine offers for a single slave
//
// round-robin create executors on the available offers
while (launchTasks) {
launchTasks = false
for (offer <- offers) {
val slaveId = offer.getSlaveId.getValue
val offerId = offer.getId.getValue
val resources = remainingResources(offerId)
if (canLaunchTask(slaveId, resources)) {
// Create a task
launchTasks = true
val taskId = newMesosTaskId()
val offerCPUs = getResource(resources, "cpus").toInt
val taskGPUs = Math.min(
Math.max(0, maxGpus - totalGpusAcquired), getResource(resources, "gpus").toInt)
val taskCPUs = executorCores(offerCPUs)
val taskMemory = executorMemory(sc)
slaves.getOrElseUpdate(slaveId, new Slave(offer.getHostname)).taskIDs.add(taskId)
val (resourcesLeft, resourcesToUse) =
partitionTaskResources(resources, taskCPUs, taskMemory, taskGPUs)
val taskBuilder = MesosTaskInfo.newBuilder()
.setTaskId(TaskID.newBuilder().setValue(taskId.toString).build())
.setSlaveId(offer.getSlaveId)
.setCommand(createCommand(offer, taskCPUs + extraCoresPerExecutor, taskId))
.setName(s"${sc.appName} $taskId")
.setLabels(MesosProtoUtils.mesosLabels(taskLabels))
.addAllResources(resourcesToUse.asJava)
.setContainer(MesosSchedulerBackendUtil.containerInfo(sc.conf))
tasks(offer.getId) ::= taskBuilder.build()
remainingResources(offerId) = resourcesLeft.asJava
totalCoresAcquired += taskCPUs
coresByTaskId(taskId) = taskCPUs
if (taskGPUs > 0) {
totalGpusAcquired += taskGPUs
gpusByTaskId(taskId) = taskGPUs
}
}
}
}
tasks.toMap
}
/** Extracts task needed resources from a list of available resources. */
private def partitionTaskResources(
resources: JList[Resource],
taskCPUs: Int,
taskMemory: Int,
taskGPUs: Int)
: (List[Resource], List[Resource]) = {
// partition cpus & mem
val (afterCPUResources, cpuResourcesToUse) = partitionResources(resources, "cpus", taskCPUs)
val (afterMemResources, memResourcesToUse) =
partitionResources(afterCPUResources.asJava, "mem", taskMemory)
val (afterGPUResources, gpuResourcesToUse) =
partitionResources(afterMemResources.asJava, "gpus", taskGPUs)
// If user specifies port numbers in SparkConfig then consecutive tasks will not be launched
// on the same host. This essentially means one executor per host.
// TODO: handle network isolator case
val (nonPortResources, portResourcesToUse) =
partitionPortResources(nonZeroPortValuesFromConfig(sc.conf), afterGPUResources)
(nonPortResources,
cpuResourcesToUse ++ memResourcesToUse ++ portResourcesToUse ++ gpuResourcesToUse)
}
private def canLaunchTask(slaveId: String, resources: JList[Resource]): Boolean = {
val offerMem = getResource(resources, "mem")
val offerCPUs = getResource(resources, "cpus").toInt
val cpus = executorCores(offerCPUs)
val mem = executorMemory(sc)
val ports = getRangeResource(resources, "ports")
val meetsPortRequirements = checkPorts(sc.conf, ports)
cpus > 0 &&
cpus <= offerCPUs &&
cpus + totalCoresAcquired <= maxCores &&
mem <= offerMem &&
numExecutors() < executorLimit &&
slaves.get(slaveId).map(_.taskFailures).getOrElse(0) < MAX_SLAVE_FAILURES &&
meetsPortRequirements
}
private def executorCores(offerCPUs: Int): Int = {
executorCoresOption.getOrElse(
math.min(offerCPUs, maxCores - totalCoresAcquired)
)
}
override def statusUpdate(d: org.apache.mesos.SchedulerDriver, status: TaskStatus) {
val taskId = status.getTaskId.getValue
val slaveId = status.getSlaveId.getValue
val state = mesosToTaskState(status.getState)
logInfo(s"Mesos task $taskId is now ${status.getState}")
stateLock.synchronized {
val slave = slaves(slaveId)
// If the shuffle service is enabled, have the driver register with each one of the
// shuffle services. This allows the shuffle services to clean up state associated with
// this application when the driver exits. There is currently not a great way to detect
// this through Mesos, since the shuffle services are set up independently.
if (state.equals(TaskState.RUNNING) &&
shuffleServiceEnabled &&
!slave.shuffleRegistered) {
assume(mesosExternalShuffleClient.isDefined,
"External shuffle client was not instantiated even though shuffle service is enabled.")
// TODO: Remove this and allow the MesosExternalShuffleService to detect
// framework termination when new Mesos Framework HTTP API is available.
val externalShufflePort = conf.getInt("spark.shuffle.service.port", 7337)
logDebug(s"Connecting to shuffle service on slave $slaveId, " +
s"host ${slave.hostname}, port $externalShufflePort for app ${conf.getAppId}")
mesosExternalShuffleClient.get
.registerDriverWithShuffleService(
slave.hostname,
externalShufflePort,
sc.conf.getTimeAsMs("spark.storage.blockManagerSlaveTimeoutMs",
s"${sc.conf.getTimeAsMs("spark.network.timeout", "120s")}ms"),
sc.conf.getTimeAsMs("spark.executor.heartbeatInterval", "10s"))
slave.shuffleRegistered = true
}
if (TaskState.isFinished(state)) {
// Remove the cores we have remembered for this task, if it's in the hashmap
for (cores <- coresByTaskId.get(taskId)) {
totalCoresAcquired -= cores
coresByTaskId -= taskId
}
// Also remove the gpus we have remembered for this task, if it's in the hashmap
for (gpus <- gpusByTaskId.get(taskId)) {
totalGpusAcquired -= gpus
gpusByTaskId -= taskId
}
// If it was a failure, mark the slave as failed for blacklisting purposes
if (TaskState.isFailed(state)) {
slave.taskFailures += 1
if (slave.taskFailures >= MAX_SLAVE_FAILURES) {
logInfo(s"Blacklisting Mesos slave $slaveId due to too many failures; " +
"is Spark installed on it?")
}
}
executorTerminated(d, slaveId, taskId, s"Executor finished with state $state")
// In case we'd rejected everything before but have now lost a node
d.reviveOffers()
}
}
}
override def error(d: org.apache.mesos.SchedulerDriver, message: String) {
logError(s"Mesos error: $message")
scheduler.error(message)
}
override def stop() {
// Make sure we're not launching tasks during shutdown
stateLock.synchronized {
if (stopCalled) {
logWarning("Stop called multiple times, ignoring")
return
}
stopCalled = true
super.stop()
}
// Wait for executors to report done, or else mesosDriver.stop() will forcefully kill them.
// See SPARK-12330
val startTime = System.nanoTime()
// slaveIdsWithExecutors has no memory barrier, so this is eventually consistent
while (numExecutors() > 0 &&
System.nanoTime() - startTime < shutdownTimeoutMS * 1000L * 1000L) {
Thread.sleep(100)
}
if (numExecutors() > 0) {
logWarning(s"Timed out waiting for ${numExecutors()} remaining executors "
+ s"to terminate within $shutdownTimeoutMS ms. This may leave temporary files "
+ "on the mesos nodes.")
}
// Close the mesos external shuffle client if used
mesosExternalShuffleClient.foreach(_.close())
if (schedulerDriver != null) {
schedulerDriver.stop()
}
}
override def frameworkMessage(
d: org.apache.mesos.SchedulerDriver, e: ExecutorID, s: SlaveID, b: Array[Byte]) {}
/**
* Called when a slave is lost or a Mesos task finished. Updates local view on
* what tasks are running. It also notifies the driver that an executor was removed.
*/
private def executorTerminated(
d: org.apache.mesos.SchedulerDriver,
slaveId: String,
taskId: String,
reason: String): Unit = {
stateLock.synchronized {
// Do not call removeExecutor() after this scheduler backend was stopped because
// removeExecutor() internally will send a message to the driver endpoint but
// the driver endpoint is not available now, otherwise an exception will be thrown.
if (!stopCalled) {
removeExecutor(taskId, SlaveLost(reason))
}
slaves(slaveId).taskIDs.remove(taskId)
}
}
override def slaveLost(d: org.apache.mesos.SchedulerDriver, slaveId: SlaveID): Unit = {
logInfo(s"Mesos slave lost: ${slaveId.getValue}")
}
override def executorLost(
d: org.apache.mesos.SchedulerDriver, e: ExecutorID, s: SlaveID, status: Int): Unit = {
logInfo("Mesos executor lost: %s".format(e.getValue))
}
override def applicationId(): String =
Option(appId).getOrElse {
logWarning("Application ID is not initialized yet.")
super.applicationId
}
override def doRequestTotalExecutors(requestedTotal: Int): Future[Boolean] = Future.successful {
// We don't truly know if we can fulfill the full amount of executors
// since at coarse grain it depends on the amount of slaves available.
logInfo("Capping the total amount of executors to " + requestedTotal)
executorLimitOption = Some(requestedTotal)
true
}
override def doKillExecutors(executorIds: Seq[String]): Future[Boolean] = Future.successful {
if (schedulerDriver == null) {
logWarning("Asked to kill executors before the Mesos driver was started.")
false
} else {
for (executorId <- executorIds) {
val taskId = TaskID.newBuilder().setValue(executorId).build()
schedulerDriver.killTask(taskId)
}
// no need to adjust `executorLimitOption` since the AllocationManager already communicated
// the desired limit through a call to `doRequestTotalExecutors`.
// See [[o.a.s.scheduler.cluster.CoarseGrainedSchedulerBackend.killExecutors]]
true
}
}
private def numExecutors(): Int = {
slaves.values.map(_.taskIDs.size).sum
}
private def executorHostname(offer: Offer): String = {
if (sc.conf.get(NETWORK_NAME).isDefined) {
// The agent's IP is not visible in a CNI container, so we bind to 0.0.0.0
"0.0.0.0"
} else {
offer.getHostname
}
}
}
private class Slave(val hostname: String) {
val taskIDs = new mutable.HashSet[String]()
var taskFailures = 0
var shuffleRegistered = false
}
object IdHelper {
// Use atomic values since Spark contexts can be initialized in parallel
private[mesos] val nextSCNumber = new AtomicLong(0)
private[mesos] val startedBefore = new AtomicBoolean(false)
}
|
narahari92/spark
|
resource-managers/mesos/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosCoarseGrainedSchedulerBackend.scala
|
Scala
|
apache-2.0
| 25,989 |
package com.lynbrookrobotics.potassium.commons.drivetrain.unicycle.control
import com.lynbrookrobotics.potassium.streams.Stream
import squants.{Length, Time}
import squants.motion._
import squants.space.Feet
trait UnicycleMotionProfileControllers extends UnicycleCoreControllers {
/**
* Uses a control loop that results in constant acceleration,
* then constant cruising velocity, and then constant deceleration to the
* target final velocity at the target position. It calculates the
* velocity required, which is then meant to be acted on by a well tuned
* velocity controller. This controller is meant to control second order
* systems such as driving a Length.
*
* @param cruisingVelocity desired magnitude of cruising velocity
* @param finalVelocity desired final velocity when target is reached
* @param acceleration desired magnitude of acceleration of the system
* @param position current position
* @return velocity to travel at to achieve a trapezoidal motion profile.
* This value must be used a well tuned velocity controller to result
* in correct behaviour
*/
def trapezoidalDriveControl(
cruisingVelocity: Velocity,
finalVelocity: Velocity,
acceleration: Acceleration,
decceleration: Acceleration,
position: Stream[Length],
targetPosition: Stream[Length],
velocity: Stream[Velocity]
): (Stream[Velocity], Stream[Length]) = {
val error = targetPosition.minus(position)
val signError = error.map(error => Math.signum(error.toFeet))
val LengthTraveled = position.minus(position.currentValue)
// Travel at 0.1 ft/s for the first 0.25 feet
val KickstartLength = Feet(0.25)
val timeFromStart = position.originTimeStream.get.relativize { (startTime, currentTime) =>
currentTime - startTime
}
/**
* Calculate the magnitude of ideal velocity assuming constant acceleration
* as function of current position using equation V^2 = V0^2 + 2a(x-x_0).
* Direction of ideal velocity is decided later.
*/
val velocityAccel = LengthTraveled.zip(velocity.currentValue).zip(timeFromStart).map {
case ((traveled, initVelocity), timeFromStart) =>
// otherwise additional output is zero and nothing happens
if (traveled.abs <= KickstartLength) {
initVelocity + acceleration * timeFromStart
} else {
val V0Squared = Math.pow(initVelocity.toFeetPerSecond, 2)
val accelerationValue = acceleration.toFeetPerSecondSquared
val LengthTraveledValue = traveled.abs.toFeet
FeetPerSecond(math.sqrt(math.abs(V0Squared + 2 * accelerationValue * LengthTraveledValue)))
}
}
/**
* Calculate magnitude of ideal velocity assuming constant deceleration
* using the equation V_final^2 = V_curr^2 + 2a(target - x_current).
* Solving for V_curr, we get V_curr^2 = V_f^2 - 2a(target - x_curr).
* Since, we are decelerating, a = -acceleration (value passed on
* construction), getting
* V_curr = V_curr^2 = V_f^2 + 2*acceleration*(target - x_curr)
* Direction of ideal velocity is decided later.
*/
val velocityDeccel = error.map { toTarget =>
val finalVelocitySquared = math.pow(finalVelocity.toFeetPerSecond, 2)
val errorValue = toTarget.abs.toFeet
val accelerationValue = decceleration.toFeetPerSecondSquared
FeetPerSecond(math.sqrt(math.abs(finalVelocitySquared + 2 * accelerationValue * errorValue)))
}
// Ensure that motion is in the direction of the error
val velocityOutput = velocityDeccel.zip(velocityAccel).zip(signError).map {
case ((velDec, velAcc), sign) => {
val minVelocity = velDec.abs min cruisingVelocity.abs min velAcc.abs
sign * minVelocity
}
}
(velocityOutput, error)
}
}
|
Team846/potassium
|
commons/src/main/scala/com/lynbrookrobotics/potassium/commons/drivetrain/unicycle/control/UnicycleMotionProfileControllers.scala
|
Scala
|
mit
| 3,847 |
package views
import play.api.{Logger, Play}
import java.io.InputStream
import com.ee.assets.deployment.{ContentInfo, Deployer}
import com.ee.assets.models.SimpleAssetsInfo
import com.ee.assets.transformers.{SimpleDeployedElement, DeployedElement}
object Helper{
val deployer : Deployer = new Deployer {
override def deploy(filename: String, lastModified: Long, contents: => InputStream, info: ContentInfo): Either[String, DeployedElement] = {
//do your deployment here...
println(filename)
println(lastModified)
println(info)
Right(SimpleDeployedElement(s"http://some-non-existent-server.com/$filename"))
}
}
object ExternalHosts{
import Play.current
lazy val logger = Logger("external-hosts")
val configPath = "assets.hosts"
var hostsIndex = 0
import scala.collection.JavaConverters._
lazy val hosts = Play.configuration.getStringList(configPath).fold(List[String]())(_.asScala.toList)
def getNextHost = {
val host = ExternalHosts.hosts(ExternalHosts.hostsIndex)
if(ExternalHosts.hostsIndex == ExternalHosts.hosts.size-1) ExternalHosts.hostsIndex = 0
else ExternalHosts.hostsIndex += 1
host
}
}
case class RotatingHostsDeployedElement(deployedPath: String, getHost: () => String) extends DeployedElement{
override def path: String = s"${getHost()}$deployedPath"
}
/**
* An example of rotating hosts for a deployed element.
*/
val rotatingDeployer : Deployer = new Deployer {
override def deploy(filename: String, lastModified: Long, contents: => InputStream, info: ContentInfo): Either[String, DeployedElement] = {
//do your deployment here...
println(filename)
println(lastModified)
println(info)
Right(RotatingHostsDeployedElement(filename, ExternalHosts.getNextHost _))
}
}
val loader = new com.ee.assets.Loader(None, Play.current.mode, Play.current.configuration, info = SimpleAssetsInfo("assets", "public"))
val deployLoader = new com.ee.assets.Loader(Some(deployer), Play.current.mode, Play.current.configuration)
val rotatingHostsLoader = new com.ee.assets.Loader(Some(rotatingDeployer), Play.current.mode, Play.current.configuration)
}
|
edeustace/assets-loader
|
example/example-play-app/app/views/Helper.scala
|
Scala
|
mit
| 2,229 |
package com.softwaremill.codebrag.repository
import org.eclipse.jgit.api.Git
import scala.collection.JavaConversions._
import org.eclipse.jgit.api.ListBranchCommand.ListMode
trait BranchesModel {
def remoteBranchesFullNames: Set[String]
def findStaleBranchesFullNames(locallyCachedBranches: Set[String]): Set[String]
def getCheckedOutBranchFullName: String
def resolveFullBranchName(branchName: String) = {
if(branchName.startsWith(RepositoryBranchPrefix)) {
branchName
} else {
s"${RepositoryBranchPrefix}${branchName}"
}
}
val RepositoryBranchPrefix: String
}
trait GitRepoBranchesModel extends BranchesModel {
self: GitRepository =>
def remoteBranchesFullNames = {
remoteBranches.map(_.getName).filterNot(_ == s"${RepositoryBranchPrefix}HEAD").toSet
}
def findStaleBranchesFullNames(locallyCachedBranches: Set[String]) = {
val branches = remoteBranchesFullNames
locallyCachedBranches.filterNot(branches.contains)
}
def getCheckedOutBranchFullName = s"${RepositoryBranchPrefix}${repo.getBranch}"
val RepositoryBranchPrefix = "refs/remotes/origin/"
val BranchListMode = ListMode.REMOTE
private def remoteBranches = new Git(repo).branchList().setListMode(BranchListMode).call().toList
}
trait GitSvnBranchesModel extends BranchesModel {
self: GitSvnRepository =>
def remoteBranchesFullNames = Set("master")
def findStaleBranchesFullNames(locallyCachedBranches: Set[String]) = Set.empty
def getCheckedOutBranchFullName = "master"
val RepositoryBranchPrefix = ""
}
|
softwaremill/codebrag
|
codebrag-service/src/main/scala/com/softwaremill/codebrag/repository/BranchesModel.scala
|
Scala
|
agpl-3.0
| 1,562 |
package scala.meta.metac
import scala.meta.cli._
final class Settings private (val scalacArgs: List[String]) {
private def this() = {
this(scalacArgs = Nil)
}
def withScalacArgs(scalacArgs: List[String]): Settings = {
copy(scalacArgs = scalacArgs)
}
private def copy(scalacArgs: List[String] = scalacArgs): Settings = {
new Settings(scalacArgs = scalacArgs)
}
}
object Settings {
def parse(args: List[String], reporter: Reporter): Option[Settings] = {
Some(new Settings(args))
}
def apply(): Settings = {
new Settings()
}
}
|
MasseGuillaume/scalameta
|
semanticdb/metac/src/main/scala/scala/meta/metac/Settings.scala
|
Scala
|
bsd-3-clause
| 570 |
package com.evecentral.routes
import org.scalatest.{BeforeAndAfterAll, FunSuiteLike}
import akka.actor.{Props, ActorSystem, Actor}
import com.evecentral.dataaccess.StaticProvider
import akka.testkit.{TestActorRef, TestKit}
class RouteFinderTest(as: ActorSystem) extends TestKit(as) with FunSuiteLike with BeforeAndAfterAll {
def this() = this(ActorSystem("MySpec"))
override def afterAll() {
system.shutdown()
}
val rfa = system.actorOf(Props[RouteFinderActor])
val rf = TestActorRef[RouteFinderActor].underlyingActor
val jita = StaticProvider.systemsMap(30000142)
val sagain = StaticProvider.systemsMap(30001719)
val perimiter = StaticProvider.systemsMap(30000144)
test("Jita to Sagain distance") {
assert(rf.routeDistance(jita, sagain) == 15)
}
test("Jita to Sagain route") {
val route = rf.route(jita, sagain)
val expected_route_contains = Jump(jita, perimiter)
assert(route contains expected_route_contains, "Got %s" format (route))
}
test("Actor Jita to Sagain distance") {
(rfa ! DistanceBetween(jita, sagain))
//expectMsg(15)
}
}
|
theatrus/eve-central.com
|
core/src/test/scala/com/evecentral/routes/TestRouteFinderActor.scala
|
Scala
|
agpl-3.0
| 1,075 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.api.python
import java.io._
import java.net._
import java.nio.charset.StandardCharsets
import java.util.{ArrayList => JArrayList, List => JList, Map => JMap}
import scala.collection.JavaConverters._
import scala.collection.mutable
import scala.language.existentials
import scala.util.control.NonFatal
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.io.compress.CompressionCodec
import org.apache.hadoop.mapred.{InputFormat, JobConf, OutputFormat}
import org.apache.hadoop.mapreduce.{InputFormat => NewInputFormat, OutputFormat => NewOutputFormat}
import org.apache.spark._
import org.apache.spark.api.java.{JavaPairRDD, JavaRDD, JavaSparkContext}
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.input.PortableDataStream
import org.apache.spark.internal.Logging
import org.apache.spark.rdd.RDD
import org.apache.spark.security.SocketAuthHelper
import org.apache.spark.util._
private[spark] class PythonRDD(
parent: RDD[_],
func: PythonFunction,
preservePartitoning: Boolean)
extends RDD[Array[Byte]](parent) {
val bufferSize = conf.getInt("spark.buffer.size", 65536)
val reuseWorker = conf.getBoolean("spark.python.worker.reuse", true)
override def getPartitions: Array[Partition] = firstParent.partitions
override val partitioner: Option[Partitioner] = {
if (preservePartitoning) firstParent.partitioner else None
}
val asJavaRDD: JavaRDD[Array[Byte]] = JavaRDD.fromRDD(this)
override def compute(split: Partition, context: TaskContext): Iterator[Array[Byte]] = {
val runner = PythonRunner(func, bufferSize, reuseWorker)
runner.compute(firstParent.iterator(split, context), split.index, context)
}
}
/**
* A wrapper for a Python function, contains all necessary context to run the function in Python
* runner.
*/
private[spark] case class PythonFunction(
command: Array[Byte],
envVars: JMap[String, String],
pythonIncludes: JList[String],
pythonExec: String,
pythonVer: String,
broadcastVars: JList[Broadcast[PythonBroadcast]],
accumulator: PythonAccumulatorV2)
/**
* A wrapper for chained Python functions (from bottom to top).
* @param funcs
*/
private[spark] case class ChainedPythonFunctions(funcs: Seq[PythonFunction])
/** Thrown for exceptions in user Python code. */
private[spark] class PythonException(msg: String, cause: Exception)
extends RuntimeException(msg, cause)
/**
* Form an RDD[(Array[Byte], Array[Byte])] from key-value pairs returned from Python.
* This is used by PySpark's shuffle operations.
*/
private class PairwiseRDD(prev: RDD[Array[Byte]]) extends RDD[(Long, Array[Byte])](prev) {
override def getPartitions: Array[Partition] = prev.partitions
override val partitioner: Option[Partitioner] = prev.partitioner
override def compute(split: Partition, context: TaskContext): Iterator[(Long, Array[Byte])] =
prev.iterator(split, context).grouped(2).map {
case Seq(a, b) => (Utils.deserializeLongValue(a), b)
case x => throw new SparkException("PairwiseRDD: unexpected value: " + x)
}
val asJavaPairRDD : JavaPairRDD[Long, Array[Byte]] = JavaPairRDD.fromRDD(this)
}
private[spark] object PythonRDD extends Logging {
// remember the broadcasts sent to each worker
private val workerBroadcasts = new mutable.WeakHashMap[Socket, mutable.Set[Long]]()
// Authentication helper used when serving iterator data.
private lazy val authHelper = {
val conf = Option(SparkEnv.get).map(_.conf).getOrElse(new SparkConf())
new SocketAuthHelper(conf)
}
def getWorkerBroadcasts(worker: Socket): mutable.Set[Long] = {
synchronized {
workerBroadcasts.getOrElseUpdate(worker, new mutable.HashSet[Long]())
}
}
/**
* Return an RDD of values from an RDD of (Long, Array[Byte]), with preservePartitions=true
*
* This is useful for PySpark to have the partitioner after partitionBy()
*/
def valueOfPair(pair: JavaPairRDD[Long, Array[Byte]]): JavaRDD[Array[Byte]] = {
pair.rdd.mapPartitions(it => it.map(_._2), true)
}
/**
* Adapter for calling SparkContext#runJob from Python.
*
* This method will serve an iterator of an array that contains all elements in the RDD
* (effectively a collect()), but allows you to run on a certain subset of partitions,
* or to enable local execution.
*
* @return 2-tuple (as a Java array) with the port number of a local socket which serves the
* data collected from this job, and the secret for authentication.
*/
def runJob(
sc: SparkContext,
rdd: JavaRDD[Array[Byte]],
partitions: JArrayList[Int]): Array[Any] = {
type ByteArray = Array[Byte]
type UnrolledPartition = Array[ByteArray]
val allPartitions: Array[UnrolledPartition] =
sc.runJob(rdd, (x: Iterator[ByteArray]) => x.toArray, partitions.asScala)
val flattenedPartition: UnrolledPartition = Array.concat(allPartitions: _*)
serveIterator(flattenedPartition.iterator,
s"serve RDD ${rdd.id} with partitions ${partitions.asScala.mkString(",")}")
}
/**
* A helper function to collect an RDD as an iterator, then serve it via socket.
*
* @return 2-tuple (as a Java array) with the port number of a local socket which serves the
* data collected from this job, and the secret for authentication.
*/
def collectAndServe[T](rdd: RDD[T]): Array[Any] = {
serveIterator(rdd.collect().iterator, s"serve RDD ${rdd.id}")
}
def toLocalIteratorAndServe[T](rdd: RDD[T]): Array[Any] = {
serveIterator(rdd.toLocalIterator, s"serve toLocalIterator")
}
def readRDDFromFile(sc: JavaSparkContext, filename: String, parallelism: Int):
JavaRDD[Array[Byte]] = {
val file = new DataInputStream(new FileInputStream(filename))
try {
val objs = new mutable.ArrayBuffer[Array[Byte]]
try {
while (true) {
val length = file.readInt()
val obj = new Array[Byte](length)
file.readFully(obj)
objs += obj
}
} catch {
case eof: EOFException => // No-op
}
JavaRDD.fromRDD(sc.sc.parallelize(objs, parallelism))
} finally {
file.close()
}
}
def readBroadcastFromFile(sc: JavaSparkContext, path: String): Broadcast[PythonBroadcast] = {
sc.broadcast(new PythonBroadcast(path))
}
def writeIteratorToStream[T](iter: Iterator[T], dataOut: DataOutputStream) {
def write(obj: Any): Unit = obj match {
case null =>
dataOut.writeInt(SpecialLengths.NULL)
case arr: Array[Byte] =>
dataOut.writeInt(arr.length)
dataOut.write(arr)
case str: String =>
writeUTF(str, dataOut)
case stream: PortableDataStream =>
write(stream.toArray())
case (key, value) =>
write(key)
write(value)
case other =>
throw new SparkException("Unexpected element type " + other.getClass)
}
iter.foreach(write)
}
/**
* Create an RDD from a path using [[org.apache.hadoop.mapred.SequenceFileInputFormat]],
* key and value class.
* A key and/or value converter class can optionally be passed in
* (see [[org.apache.spark.api.python.Converter]])
*/
def sequenceFile[K, V](
sc: JavaSparkContext,
path: String,
keyClassMaybeNull: String,
valueClassMaybeNull: String,
keyConverterClass: String,
valueConverterClass: String,
minSplits: Int,
batchSize: Int): JavaRDD[Array[Byte]] = {
val keyClass = Option(keyClassMaybeNull).getOrElse("org.apache.hadoop.io.Text")
val valueClass = Option(valueClassMaybeNull).getOrElse("org.apache.hadoop.io.Text")
val kc = Utils.classForName(keyClass).asInstanceOf[Class[K]]
val vc = Utils.classForName(valueClass).asInstanceOf[Class[V]]
val rdd = sc.sc.sequenceFile[K, V](path, kc, vc, minSplits)
val confBroadcasted = sc.sc.broadcast(new SerializableConfiguration(sc.hadoopConfiguration()))
val converted = convertRDD(rdd, keyConverterClass, valueConverterClass,
new WritableToJavaConverter(confBroadcasted))
JavaRDD.fromRDD(SerDeUtil.pairRDDToPython(converted, batchSize))
}
/**
* Create an RDD from a file path, using an arbitrary [[org.apache.hadoop.mapreduce.InputFormat]],
* key and value class.
* A key and/or value converter class can optionally be passed in
* (see [[org.apache.spark.api.python.Converter]])
*/
def newAPIHadoopFile[K, V, F <: NewInputFormat[K, V]](
sc: JavaSparkContext,
path: String,
inputFormatClass: String,
keyClass: String,
valueClass: String,
keyConverterClass: String,
valueConverterClass: String,
confAsMap: java.util.HashMap[String, String],
batchSize: Int): JavaRDD[Array[Byte]] = {
val mergedConf = getMergedConf(confAsMap, sc.hadoopConfiguration())
val rdd =
newAPIHadoopRDDFromClassNames[K, V, F](sc,
Some(path), inputFormatClass, keyClass, valueClass, mergedConf)
val confBroadcasted = sc.sc.broadcast(new SerializableConfiguration(mergedConf))
val converted = convertRDD(rdd, keyConverterClass, valueConverterClass,
new WritableToJavaConverter(confBroadcasted))
JavaRDD.fromRDD(SerDeUtil.pairRDDToPython(converted, batchSize))
}
/**
* Create an RDD from a [[org.apache.hadoop.conf.Configuration]] converted from a map that is
* passed in from Python, using an arbitrary [[org.apache.hadoop.mapreduce.InputFormat]],
* key and value class.
* A key and/or value converter class can optionally be passed in
* (see [[org.apache.spark.api.python.Converter]])
*/
def newAPIHadoopRDD[K, V, F <: NewInputFormat[K, V]](
sc: JavaSparkContext,
inputFormatClass: String,
keyClass: String,
valueClass: String,
keyConverterClass: String,
valueConverterClass: String,
confAsMap: java.util.HashMap[String, String],
batchSize: Int): JavaRDD[Array[Byte]] = {
val conf = PythonHadoopUtil.mapToConf(confAsMap)
val rdd =
newAPIHadoopRDDFromClassNames[K, V, F](sc,
None, inputFormatClass, keyClass, valueClass, conf)
val confBroadcasted = sc.sc.broadcast(new SerializableConfiguration(conf))
val converted = convertRDD(rdd, keyConverterClass, valueConverterClass,
new WritableToJavaConverter(confBroadcasted))
JavaRDD.fromRDD(SerDeUtil.pairRDDToPython(converted, batchSize))
}
private def newAPIHadoopRDDFromClassNames[K, V, F <: NewInputFormat[K, V]](
sc: JavaSparkContext,
path: Option[String] = None,
inputFormatClass: String,
keyClass: String,
valueClass: String,
conf: Configuration): RDD[(K, V)] = {
val kc = Utils.classForName(keyClass).asInstanceOf[Class[K]]
val vc = Utils.classForName(valueClass).asInstanceOf[Class[V]]
val fc = Utils.classForName(inputFormatClass).asInstanceOf[Class[F]]
if (path.isDefined) {
sc.sc.newAPIHadoopFile[K, V, F](path.get, fc, kc, vc, conf)
} else {
sc.sc.newAPIHadoopRDD[K, V, F](conf, fc, kc, vc)
}
}
/**
* Create an RDD from a file path, using an arbitrary [[org.apache.hadoop.mapred.InputFormat]],
* key and value class.
* A key and/or value converter class can optionally be passed in
* (see [[org.apache.spark.api.python.Converter]])
*/
def hadoopFile[K, V, F <: InputFormat[K, V]](
sc: JavaSparkContext,
path: String,
inputFormatClass: String,
keyClass: String,
valueClass: String,
keyConverterClass: String,
valueConverterClass: String,
confAsMap: java.util.HashMap[String, String],
batchSize: Int): JavaRDD[Array[Byte]] = {
val mergedConf = getMergedConf(confAsMap, sc.hadoopConfiguration())
val rdd =
hadoopRDDFromClassNames[K, V, F](sc,
Some(path), inputFormatClass, keyClass, valueClass, mergedConf)
val confBroadcasted = sc.sc.broadcast(new SerializableConfiguration(mergedConf))
val converted = convertRDD(rdd, keyConverterClass, valueConverterClass,
new WritableToJavaConverter(confBroadcasted))
JavaRDD.fromRDD(SerDeUtil.pairRDDToPython(converted, batchSize))
}
/**
* Create an RDD from a [[org.apache.hadoop.conf.Configuration]] converted from a map
* that is passed in from Python, using an arbitrary [[org.apache.hadoop.mapred.InputFormat]],
* key and value class
* A key and/or value converter class can optionally be passed in
* (see [[org.apache.spark.api.python.Converter]])
*/
def hadoopRDD[K, V, F <: InputFormat[K, V]](
sc: JavaSparkContext,
inputFormatClass: String,
keyClass: String,
valueClass: String,
keyConverterClass: String,
valueConverterClass: String,
confAsMap: java.util.HashMap[String, String],
batchSize: Int): JavaRDD[Array[Byte]] = {
val conf = PythonHadoopUtil.mapToConf(confAsMap)
val rdd =
hadoopRDDFromClassNames[K, V, F](sc,
None, inputFormatClass, keyClass, valueClass, conf)
val confBroadcasted = sc.sc.broadcast(new SerializableConfiguration(conf))
val converted = convertRDD(rdd, keyConverterClass, valueConverterClass,
new WritableToJavaConverter(confBroadcasted))
JavaRDD.fromRDD(SerDeUtil.pairRDDToPython(converted, batchSize))
}
private def hadoopRDDFromClassNames[K, V, F <: InputFormat[K, V]](
sc: JavaSparkContext,
path: Option[String] = None,
inputFormatClass: String,
keyClass: String,
valueClass: String,
conf: Configuration) = {
val kc = Utils.classForName(keyClass).asInstanceOf[Class[K]]
val vc = Utils.classForName(valueClass).asInstanceOf[Class[V]]
val fc = Utils.classForName(inputFormatClass).asInstanceOf[Class[F]]
if (path.isDefined) {
sc.sc.hadoopFile(path.get, fc, kc, vc)
} else {
sc.sc.hadoopRDD(new JobConf(conf), fc, kc, vc)
}
}
def writeUTF(str: String, dataOut: DataOutputStream) {
val bytes = str.getBytes(StandardCharsets.UTF_8)
dataOut.writeInt(bytes.length)
dataOut.write(bytes)
}
/**
* Create a socket server and a background thread to serve the data in `items`,
*
* The socket server can only accept one connection, or close if no connection
* in 15 seconds.
*
* Once a connection comes in, it tries to serialize all the data in `items`
* and send them into this connection.
*
* The thread will terminate after all the data are sent or any exceptions happen.
*
* @return 2-tuple (as a Java array) with the port number of a local socket which serves the
* data collected from this job, and the secret for authentication.
*/
def serveIterator(items: Iterator[_], threadName: String): Array[Any] = {
val serverSocket = new ServerSocket(0, 1, InetAddress.getByName("localhost"))
// Close the socket if no connection in 15 seconds
serverSocket.setSoTimeout(15000)
new Thread(threadName) {
setDaemon(true)
override def run() {
try {
val sock = serverSocket.accept()
authHelper.authClient(sock)
val out = new DataOutputStream(new BufferedOutputStream(sock.getOutputStream))
Utils.tryWithSafeFinally {
writeIteratorToStream(items, out)
} {
out.close()
sock.close()
}
} catch {
case NonFatal(e) =>
logError(s"Error while sending iterator", e)
} finally {
serverSocket.close()
}
}
}.start()
Array(serverSocket.getLocalPort, authHelper.secret)
}
private def getMergedConf(confAsMap: java.util.HashMap[String, String],
baseConf: Configuration): Configuration = {
val conf = PythonHadoopUtil.mapToConf(confAsMap)
PythonHadoopUtil.mergeConfs(baseConf, conf)
}
private def inferKeyValueTypes[K, V](rdd: RDD[(K, V)], keyConverterClass: String = null,
valueConverterClass: String = null): (Class[_], Class[_]) = {
// Peek at an element to figure out key/value types. Since Writables are not serializable,
// we cannot call first() on the converted RDD. Instead, we call first() on the original RDD
// and then convert locally.
val (key, value) = rdd.first()
val (kc, vc) = getKeyValueConverters(keyConverterClass, valueConverterClass,
new JavaToWritableConverter)
(kc.convert(key).getClass, vc.convert(value).getClass)
}
private def getKeyValueTypes(keyClass: String, valueClass: String):
Option[(Class[_], Class[_])] = {
for {
k <- Option(keyClass)
v <- Option(valueClass)
} yield (Utils.classForName(k), Utils.classForName(v))
}
private def getKeyValueConverters(keyConverterClass: String, valueConverterClass: String,
defaultConverter: Converter[Any, Any]): (Converter[Any, Any], Converter[Any, Any]) = {
val keyConverter = Converter.getInstance(Option(keyConverterClass), defaultConverter)
val valueConverter = Converter.getInstance(Option(valueConverterClass), defaultConverter)
(keyConverter, valueConverter)
}
/**
* Convert an RDD of key-value pairs from internal types to serializable types suitable for
* output, or vice versa.
*/
private def convertRDD[K, V](rdd: RDD[(K, V)],
keyConverterClass: String,
valueConverterClass: String,
defaultConverter: Converter[Any, Any]): RDD[(Any, Any)] = {
val (kc, vc) = getKeyValueConverters(keyConverterClass, valueConverterClass,
defaultConverter)
PythonHadoopUtil.convertRDD(rdd, kc, vc)
}
/**
* Output a Python RDD of key-value pairs as a Hadoop SequenceFile using the Writable types
* we convert from the RDD's key and value types. Note that keys and values can't be
* [[org.apache.hadoop.io.Writable]] types already, since Writables are not Java
* `Serializable` and we can't peek at them. The `path` can be on any Hadoop file system.
*/
def saveAsSequenceFile[K, V, C <: CompressionCodec](
pyRDD: JavaRDD[Array[Byte]],
batchSerialized: Boolean,
path: String,
compressionCodecClass: String): Unit = {
saveAsHadoopFile(
pyRDD, batchSerialized, path, "org.apache.hadoop.mapred.SequenceFileOutputFormat",
null, null, null, null, new java.util.HashMap(), compressionCodecClass)
}
/**
* Output a Python RDD of key-value pairs to any Hadoop file system, using old Hadoop
* `OutputFormat` in mapred package. Keys and values are converted to suitable output
* types using either user specified converters or, if not specified,
* [[org.apache.spark.api.python.JavaToWritableConverter]]. Post-conversion types
* `keyClass` and `valueClass` are automatically inferred if not specified. The passed-in
* `confAsMap` is merged with the default Hadoop conf associated with the SparkContext of
* this RDD.
*/
def saveAsHadoopFile[K, V, F <: OutputFormat[_, _], C <: CompressionCodec](
pyRDD: JavaRDD[Array[Byte]],
batchSerialized: Boolean,
path: String,
outputFormatClass: String,
keyClass: String,
valueClass: String,
keyConverterClass: String,
valueConverterClass: String,
confAsMap: java.util.HashMap[String, String],
compressionCodecClass: String): Unit = {
val rdd = SerDeUtil.pythonToPairRDD(pyRDD, batchSerialized)
val (kc, vc) = getKeyValueTypes(keyClass, valueClass).getOrElse(
inferKeyValueTypes(rdd, keyConverterClass, valueConverterClass))
val mergedConf = getMergedConf(confAsMap, pyRDD.context.hadoopConfiguration)
val codec = Option(compressionCodecClass).map(Utils.classForName(_).asInstanceOf[Class[C]])
val converted = convertRDD(rdd, keyConverterClass, valueConverterClass,
new JavaToWritableConverter)
val fc = Utils.classForName(outputFormatClass).asInstanceOf[Class[F]]
converted.saveAsHadoopFile(path, kc, vc, fc, new JobConf(mergedConf), codec = codec)
}
/**
* Output a Python RDD of key-value pairs to any Hadoop file system, using new Hadoop
* `OutputFormat` in mapreduce package. Keys and values are converted to suitable output
* types using either user specified converters or, if not specified,
* [[org.apache.spark.api.python.JavaToWritableConverter]]. Post-conversion types
* `keyClass` and `valueClass` are automatically inferred if not specified. The passed-in
* `confAsMap` is merged with the default Hadoop conf associated with the SparkContext of
* this RDD.
*/
def saveAsNewAPIHadoopFile[K, V, F <: NewOutputFormat[_, _]](
pyRDD: JavaRDD[Array[Byte]],
batchSerialized: Boolean,
path: String,
outputFormatClass: String,
keyClass: String,
valueClass: String,
keyConverterClass: String,
valueConverterClass: String,
confAsMap: java.util.HashMap[String, String]): Unit = {
val rdd = SerDeUtil.pythonToPairRDD(pyRDD, batchSerialized)
val (kc, vc) = getKeyValueTypes(keyClass, valueClass).getOrElse(
inferKeyValueTypes(rdd, keyConverterClass, valueConverterClass))
val mergedConf = getMergedConf(confAsMap, pyRDD.context.hadoopConfiguration)
val converted = convertRDD(rdd, keyConverterClass, valueConverterClass,
new JavaToWritableConverter)
val fc = Utils.classForName(outputFormatClass).asInstanceOf[Class[F]]
converted.saveAsNewAPIHadoopFile(path, kc, vc, fc, mergedConf)
}
/**
* Output a Python RDD of key-value pairs to any Hadoop file system, using a Hadoop conf
* converted from the passed-in `confAsMap`. The conf should set relevant output params (
* e.g., output path, output format, etc), in the same way as it would be configured for
* a Hadoop MapReduce job. Both old and new Hadoop OutputFormat APIs are supported
* (mapred vs. mapreduce). Keys/values are converted for output using either user specified
* converters or, by default, [[org.apache.spark.api.python.JavaToWritableConverter]].
*/
def saveAsHadoopDataset[K, V](
pyRDD: JavaRDD[Array[Byte]],
batchSerialized: Boolean,
confAsMap: java.util.HashMap[String, String],
keyConverterClass: String,
valueConverterClass: String,
useNewAPI: Boolean): Unit = {
val conf = PythonHadoopUtil.mapToConf(confAsMap)
val converted = convertRDD(SerDeUtil.pythonToPairRDD(pyRDD, batchSerialized),
keyConverterClass, valueConverterClass, new JavaToWritableConverter)
if (useNewAPI) {
converted.saveAsNewAPIHadoopDataset(conf)
} else {
converted.saveAsHadoopDataset(new JobConf(conf))
}
}
}
private
class BytesToString extends org.apache.spark.api.java.function.Function[Array[Byte], String] {
override def call(arr: Array[Byte]) : String = new String(arr, StandardCharsets.UTF_8)
}
/**
* Internal class that acts as an `AccumulatorV2` for Python accumulators. Inside, it
* collects a list of pickled strings that we pass to Python through a socket.
*/
private[spark] class PythonAccumulatorV2(
@transient private val serverHost: String,
private val serverPort: Int)
extends CollectionAccumulator[Array[Byte]] {
Utils.checkHost(serverHost)
val bufferSize = SparkEnv.get.conf.getInt("spark.buffer.size", 65536)
/**
* We try to reuse a single Socket to transfer accumulator updates, as they are all added
* by the DAGScheduler's single-threaded RpcEndpoint anyway.
*/
@transient private var socket: Socket = _
private def openSocket(): Socket = synchronized {
if (socket == null || socket.isClosed) {
socket = new Socket(serverHost, serverPort)
}
socket
}
// Need to override so the types match with PythonFunction
override def copyAndReset(): PythonAccumulatorV2 = new PythonAccumulatorV2(serverHost, serverPort)
override def merge(other: AccumulatorV2[Array[Byte], JList[Array[Byte]]]): Unit = synchronized {
val otherPythonAccumulator = other.asInstanceOf[PythonAccumulatorV2]
// This conditional isn't strictly speaking needed - merging only currently happens on the
// driver program - but that isn't gauranteed so incase this changes.
if (serverHost == null) {
// We are on the worker
super.merge(otherPythonAccumulator)
} else {
// This happens on the master, where we pass the updates to Python through a socket
val socket = openSocket()
val in = socket.getInputStream
val out = new DataOutputStream(new BufferedOutputStream(socket.getOutputStream, bufferSize))
val values = other.value
out.writeInt(values.size)
for (array <- values.asScala) {
out.writeInt(array.length)
out.write(array)
}
out.flush()
// Wait for a byte from the Python side as an acknowledgement
val byteRead = in.read()
if (byteRead == -1) {
throw new SparkException("EOF reached before Python server acknowledged")
}
}
}
}
/**
* A Wrapper for Python Broadcast, which is written into disk by Python. It also will
* write the data into disk after deserialization, then Python can read it from disks.
*/
// scalastyle:off no.finalize
private[spark] class PythonBroadcast(@transient var path: String) extends Serializable
with Logging {
/**
* Read data from disks, then copy it to `out`
*/
private def writeObject(out: ObjectOutputStream): Unit = Utils.tryOrIOException {
val in = new FileInputStream(new File(path))
try {
Utils.copyStream(in, out)
} finally {
in.close()
}
}
/**
* Write data into disk, using randomly generated name.
*/
private def readObject(in: ObjectInputStream): Unit = Utils.tryOrIOException {
val dir = new File(Utils.getLocalDir(SparkEnv.get.conf))
val file = File.createTempFile("broadcast", "", dir)
path = file.getAbsolutePath
val out = new FileOutputStream(file)
Utils.tryWithSafeFinally {
Utils.copyStream(in, out)
} {
out.close()
}
}
/**
* Delete the file once the object is GCed.
*/
override def finalize() {
if (!path.isEmpty) {
val file = new File(path)
if (file.exists()) {
if (!file.delete()) {
logWarning(s"Error deleting ${file.getPath}")
}
}
}
super.finalize()
}
}
// scalastyle:on no.finalize
|
lxsmnv/spark
|
core/src/main/scala/org/apache/spark/api/python/PythonRDD.scala
|
Scala
|
apache-2.0
| 27,059 |
/**
* Copyright 2015, deepsense.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.deepsense.reportlib.model
import org.scalatest.mockito.MockitoSugar
import org.scalatest.{Matchers, WordSpec}
import spray.json._
import io.deepsense.reportlib.model.factory.DistributionTestFactory
class DistributionJsonSpec
extends WordSpec
with MockitoSugar
with DistributionTestFactory
with Matchers
with ReportJsonProtocol {
"NoDistribution" should {
val noDistribution: Distribution = NoDistribution(
DistributionTestFactory.distributionName,
DistributionTestFactory.distributionDescription
)
val jsonNoDistribution: JsObject = JsObject(
"name" -> JsString(DistributionTestFactory.distributionName),
"subtype" -> JsString("no_distribution"),
"description" -> JsString(DistributionTestFactory.distributionDescription),
"missingValues" -> JsNumber(0)
)
"serialize to Json" in {
val json = noDistribution.toJson
json shouldBe jsonNoDistribution
}
"deserialize from Json" in {
val distributionObject = jsonNoDistribution.convertTo[Distribution]
distributionObject shouldBe noDistribution
}
}
"DiscreteDistribution" should {
val jsonCategoricalDistribution: JsObject = JsObject(
"name" -> JsString(DistributionTestFactory.distributionName),
"subtype" -> JsString("discrete"),
"description" -> JsString(DistributionTestFactory.distributionDescription),
"missingValues" -> JsNumber(0),
"buckets" ->
JsArray(DistributionTestFactory.categoricalDistributionBuckets.map(JsString(_)).toVector),
"counts" -> JsArray(DistributionTestFactory.distributionCounts.map(JsNumber(_)).toVector)
)
"serialize to Json" in {
val json = testCategoricalDistribution.toJson
json shouldBe jsonCategoricalDistribution
}
"deserialize from Json" in {
jsonCategoricalDistribution.convertTo[Distribution] shouldBe testCategoricalDistribution
}
}
"ContinuousDistribution" should {
val statistics = testStatistics
val jsonContinuousDistribution: JsObject = JsObject(
"name" -> JsString(DistributionTestFactory.distributionName),
"subtype" -> JsString("continuous"),
"description" -> JsString(DistributionTestFactory.distributionDescription),
"missingValues" -> JsNumber(0),
"buckets" ->
JsArray(DistributionTestFactory.continuousDistributionBuckets.map(JsString(_)).toVector),
"counts" -> JsArray(DistributionTestFactory.distributionCounts.map(JsNumber(_)).toVector),
"statistics" -> expectedStatisticsJson(statistics)
)
"serialize to Json" in {
val json = testContinuousDistribution.toJson
json shouldBe jsonContinuousDistribution
}
"deserialize from Json" in {
jsonContinuousDistribution.convertTo[Distribution] shouldBe testContinuousDistribution
}
"throw IllegalArgumentException" when {
def createContinousDistributionWith(
buckets: Seq[String], counts: Seq[Long]): ContinuousDistribution = {
ContinuousDistribution("", "", 1, buckets, counts, testStatistics)
}
"created with empty buckets and single count" in {
an[IllegalArgumentException] shouldBe thrownBy(
createContinousDistributionWith(Seq(), Seq(1)))
}
"created with buckets of size one" in {
an[IllegalArgumentException] shouldBe thrownBy(
createContinousDistributionWith(Seq("1"), Seq()))
}
"created with non empty buckets and counts of size != (buckets' size -1)" in {
an[IllegalArgumentException] shouldBe thrownBy(
createContinousDistributionWith(
Seq("0.1", "0.2", "0.3"),
Seq(1)))
}
}
}
"Statistics" should {
val statisticsWithEmptyValues = testStatisticsWithEmptyValues
"serialize to Json" in {
val json = statisticsWithEmptyValues.toJson
json shouldBe expectedStatisticsJson(statisticsWithEmptyValues)
}
"deserialize from Json" in {
expectedStatisticsJson(statisticsWithEmptyValues).convertTo[Statistics] shouldBe
statisticsWithEmptyValues
}
}
private def expectedStatisticsJson(statistics: Statistics): JsObject =
JsObject(
"max" -> jsStringOrNull(statistics.max),
"min" -> jsStringOrNull(statistics.min),
"mean" -> jsStringOrNull(statistics.mean)
)
private def jsStringOrNull(s: Option[String]): JsValue = s.map(JsString(_)).getOrElse(JsNull)
}
|
deepsense-io/seahorse-workflow-executor
|
reportlib/src/test/scala/io/deepsense/reportlib/model/DistributionJsonSpec.scala
|
Scala
|
apache-2.0
| 5,033 |
package com.rasterfoundry.api.team
import com.rasterfoundry.akkautil.{
Authentication,
CommonHandlers,
UserErrorHandler
}
import com.rasterfoundry.database._
import com.rasterfoundry.datamodel._
import akka.http.scaladsl.server.Route
import com.rasterfoundry.akkautil.PaginationDirectives
import de.heikoseeberger.akkahttpcirce.ErrorAccumulatingCirceSupport._
import java.util.UUID
import cats.data.OptionT
import cats.effect.IO
import doobie.util.transactor.Transactor
import doobie._
import doobie.implicits._
/**
* Routes for Organizations
*/
trait TeamRoutes
extends Authentication
with PaginationDirectives
with CommonHandlers
with UserErrorHandler {
val xa: Transactor[IO]
val teamRoutes: Route = handleExceptions(userExceptionHandler) {
pathPrefix(JavaUUID) { teamId =>
get {
getTeam(teamId)
}
}
}
def getTeam(teamId: UUID): Route = authenticate { user =>
authorizeAsync {
val authIO = for {
teamMember <- OptionT.liftF[ConnectionIO, Boolean](
TeamDao.userIsMember(user, teamId))
team <- OptionT[ConnectionIO, Team](TeamDao.getTeamById(teamId))
organization <- OptionT[ConnectionIO, Organization](
OrganizationDao.getOrganizationById(team.organizationId)
)
platformAdmin <- OptionT.liftF[ConnectionIO, Boolean](
PlatformDao.userIsAdmin(user, organization.platformId))
organizationMember <- OptionT.liftF[ConnectionIO, Boolean](
OrganizationDao.userIsMember(user, organization.id))
} yield { teamMember || organizationMember || platformAdmin }
authIO.value.map(_.getOrElse(false)).transact(xa).unsafeToFuture
} {
rejectEmptyResponse {
complete {
TeamDao.getTeamById(teamId).transact(xa).unsafeToFuture
}
}
}
}
}
|
aaronxsu/raster-foundry
|
app-backend/api/src/main/scala/team/Routes.scala
|
Scala
|
apache-2.0
| 1,846 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.ct600.v3
import uk.gov.hmrc.ct.box._
case class B85A(value: Option[Boolean]) extends CtBoxIdentifier("Is a repayment due for different period") with CtOptionalBoolean with Input {
}
|
hmrc/ct-calculations
|
src/main/scala/uk/gov/hmrc/ct/ct600/v3/B85A.scala
|
Scala
|
apache-2.0
| 812 |
/*
* This file is part of AckCord, licensed under the MIT License (MIT).
*
* Copyright (c) 2019 Katrix
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
package ackcord.util
import akka.actor.typed.ActorSystem
import com.typesafe.config.Config
/**
* Settings that AckCord used. See the reference config for more info.
*/
class AckCordGatewaySettings(config: Config) {
import config._
val LogReceivedWs: Boolean = getBoolean("ackcord.logging.payloads.log-received-ws")
val LogSentWs: Boolean = getBoolean("ackcord.logging.payloads.log-sent-ws")
val LogJsonTraces: Boolean = getBoolean("ackcord.logging.traces.log-json-traces")
val OnlyUniqueTraces: Boolean = getBoolean("ackcord.logging.traces.only-unique-traces")
val NumTraces: Int = getInt("ackcord.logging.traces.num-traces")
}
object AckCordGatewaySettings {
def apply()(implicit system: ActorSystem[Nothing]): AckCordGatewaySettings =
new AckCordGatewaySettings(system.settings.config)
}
|
Katrix-/AckCord
|
gateway/src/main/scala/ackcord/util/AckCordGatewaySettings.scala
|
Scala
|
mit
| 2,020 |
package im.actor.server.file
import java.io.ByteArrayOutputStream
import akka.actor.ActorSystem
import com.sksamuel.scrimage.nio.{ ImageWriter, JpegWriter, PngWriter }
import com.sksamuel.scrimage.{ Image, ParImage, Position }
import im.actor.server.acl.ACLUtils
import im.actor.server.db.DbExtension
import im.actor.server.model.AvatarData
import im.actor.server.persist.files.FileRepo
import im.actor.util.ThreadLocalSecureRandom
import slick.dbio.DBIO
import scala.concurrent.{ ExecutionContext, Future }
import scala.util.{ Failure, Success, Try }
object ImageUtils {
val AvatarSizeLimit = 1024L * 1024 // TODO: configurable
val SmallSize = 100
val LargeSize = 200
private case class ThumbDescriptor(name: String, side: Int, writer: ImageWriter)
def avatar(ad: AvatarData) =
(ad.smallOpt, ad.largeOpt, ad.fullOpt) match {
case (None, None, None) ⇒ None
case (smallOpt, largeOpt, fullOpt) ⇒
Some(Avatar(
avatarImage(smallOpt, SmallSize, SmallSize),
avatarImage(largeOpt, LargeSize, LargeSize),
avatarImage(fullOpt)
))
}
def avatarImage(idhashsize: Option[(Long, Long, Long)], width: Int, height: Int): Option[AvatarImage] =
idhashsize map {
case (id, hash, size) ⇒ AvatarImage(FileLocation(id, hash), width, height, size)
}
def avatarImage(idhashsizewh: Option[(Long, Long, Long, Int, Int)]): Option[AvatarImage] =
idhashsizewh flatMap {
case (id, hash, size, w, h) ⇒ avatarImage(Some((id, hash, size)), w, h)
}
def resizeTo(aimg: ParImage, side: Int)(implicit ec: ExecutionContext): Future[ParImage] =
for (scaledImg ← scaleTo(aimg, side)) yield scaledImg.resizeTo(side, side, Position.Center)
def scaleTo(aimg: ParImage, side: Int)(implicit ec: ExecutionContext): Future[ParImage] = {
val scaleFactor = side.toDouble / math.min(aimg.width, aimg.height)
aimg.scale(scaleFactor)
}
def dimensions(aimg: ParImage)(implicit ec: ExecutionContext): (Int, Int) =
(aimg.width, aimg.height)
def scaleStickerF(fullFileId: Long)(
implicit
fsAdapter: FileStorageAdapter,
ec: ExecutionContext,
system: ActorSystem
): Future[Either[Throwable, Avatar]] =
DbExtension(system).db.run(
scaleAvatar(
fullFileId,
ThreadLocalSecureRandom.current(),
ThumbDescriptor("small-sticker.png", 128, PngWriter()),
ThumbDescriptor("medium-sticker.png", 256, PngWriter())
)
)
def scaleAvatarF(fullFileId: Long)(
implicit
fsAdapter: FileStorageAdapter,
ec: ExecutionContext,
system: ActorSystem
): Future[Either[Throwable, Avatar]] =
DbExtension(system).db.run(scaleAvatar(fullFileId))
def scaleAvatar(fullFileId: Long)(
implicit
fsAdapter: FileStorageAdapter,
ec: ExecutionContext,
system: ActorSystem
): DBIO[Either[Throwable, Avatar]] =
scaleAvatar(fullFileId, ThreadLocalSecureRandom.current())
def scaleAvatar(
fullFileId: Long,
rng: ThreadLocalSecureRandom
)(implicit system: ActorSystem): DBIO[Either[Throwable, Avatar]] =
scaleAvatar(
fullFileId,
rng,
ThumbDescriptor("small-avatar.jpg", SmallSize, JpegWriter()),
ThumbDescriptor("large-avatar.jpg", LargeSize, JpegWriter())
)
def scaleAvatar(
fullFileId: Long,
rng: ThreadLocalSecureRandom,
smallDesc: ThumbDescriptor,
largeDesc: ThumbDescriptor
)(implicit system: ActorSystem): DBIO[Either[Throwable, Avatar]] = {
implicit val ec: ExecutionContext = system.dispatcher
val fsAdapter = FileStorageExtension(system).fsAdapter
FileRepo.find(fullFileId) flatMap {
case Some(fullFileModel) ⇒
fsAdapter.downloadFile(fullFileId) flatMap {
case Some(fullFileData) ⇒
val action = for {
fullAimg ← Future.fromTry(Try(Image(fullFileData).toPar))
(fiw, fih) = dimensions(fullAimg)
smallAimg ← resizeTo(fullAimg, smallDesc.side)
largeAimg ← resizeTo(fullAimg, largeDesc.side)
smallBaos = new ByteArrayOutputStream()
largeBaos = new ByteArrayOutputStream()
_ ← Future.fromTry(Try(smallAimg.toImage.forWriter(smallDesc.writer).write(smallBaos)))
_ ← Future.fromTry(Try(largeAimg.toImage.forWriter(largeDesc.writer).write(largeBaos)))
smallBytes = smallBaos.toByteArray
largeBytes = largeBaos.toByteArray
smallFileLocation ← fsAdapter.uploadFileF(UnsafeFileName(smallDesc.name), smallBytes)
largeFileLocation ← fsAdapter.uploadFileF(UnsafeFileName(largeDesc.name), largeBytes)
} yield {
// TODO: #perf calculate file sizes efficiently
val smallImage = AvatarImage(
smallFileLocation,
smallAimg.width,
smallAimg.height,
smallBytes.length.toLong
)
val largeImage = AvatarImage(
largeFileLocation,
largeAimg.width,
largeAimg.height,
largeBytes.length.toLong
)
val fullImage = AvatarImage(
FileLocation(fullFileId, ACLUtils.fileAccessHash(fullFileId, fullFileModel.accessSalt)),
fullAimg.width,
fullAimg.height,
fullFileData.length.toLong
)
Avatar(Some(smallImage), Some(largeImage), Some(fullImage))
}
DBIO.from(action).asTry map {
case Success(res) ⇒ Right(res)
case Failure(e) ⇒ Left(e)
}
case None ⇒ DBIO.successful(Left(new Exception("Failed to download file")))
}
case None ⇒
DBIO.successful(Left(new Exception("Cannot find file model")))
}
}
def getAvatar(avatarModel: AvatarData): Avatar = {
val smallImageOpt = avatarModel.smallOpt map {
case (fileId, fileHash, fileSize) ⇒ AvatarImage(FileLocation(fileId, fileHash), SmallSize, SmallSize, fileSize)
}
val largeImageOpt = avatarModel.largeOpt map {
case (fileId, fileHash, fileSize) ⇒ AvatarImage(FileLocation(fileId, fileHash), LargeSize, LargeSize, fileSize)
}
val fullImageOpt = avatarModel.fullOpt map {
case (fileId, fileHash, fileSize, w, h) ⇒ AvatarImage(FileLocation(fileId, fileHash), w, h, fileSize)
}
Avatar(smallImageOpt, largeImageOpt, fullImageOpt)
}
def getAvatarData(entityType: AvatarData.TypeVal, entityId: Int, avatar: Avatar): AvatarData = {
AvatarData(
entityType = entityType,
entityId = entityId.toLong,
smallAvatarFileId = avatar.smallImage map (_.fileLocation.fileId),
smallAvatarFileHash = avatar.smallImage map (_.fileLocation.accessHash),
smallAvatarFileSize = avatar.smallImage map (_.fileSize),
largeAvatarFileId = avatar.largeImage map (_.fileLocation.fileId),
largeAvatarFileHash = avatar.largeImage map (_.fileLocation.accessHash),
largeAvatarFileSize = avatar.largeImage map (_.fileSize),
fullAvatarFileId = avatar.fullImage map (_.fileLocation.fileId),
fullAvatarFileHash = avatar.fullImage map (_.fileLocation.accessHash),
fullAvatarFileSize = avatar.fullImage map (_.fileSize),
fullAvatarWidth = avatar.fullImage map (_.width),
fullAvatarHeight = avatar.fullImage map (_.height)
)
}
}
|
ljshj/actor-platform
|
actor-server/actor-core/src/main/scala/im/actor/server/file/ImageUtils.scala
|
Scala
|
mit
| 7,502 |
/*
Copyright 2014 Twitter, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.twitter.scalding.thrift.macros.impl.ordered_serialization
import com.twitter.scalding.serialization.macros.impl.ordered_serialization._
import com.twitter.scrooge.ThriftEnum
import scala.language.experimental.macros
import scala.reflect.macros.Context
object ScroogeEnumOrderedBuf {
def dispatch(c: Context): PartialFunction[c.Type, TreeOrderedBuf[c.type]] = {
import c.universe._
val pf: PartialFunction[c.Type, TreeOrderedBuf[c.type]] = {
case tpe if tpe <:< typeOf[ThriftEnum] => ScroogeEnumOrderedBuf(c)(tpe)
}
pf
}
def apply(c: Context)(outerType: c.Type): TreeOrderedBuf[c.type] = {
import c.universe._
def freshT(id: String) = newTermName(c.fresh(s"fresh_$id"))
new TreeOrderedBuf[c.type] {
override val ctx: c.type = c
override val tpe = outerType
override def compareBinary(inputStreamA: ctx.TermName, inputStreamB: ctx.TermName) =
q"""
_root_.java.lang.Integer.compare($inputStreamA.readPosVarInt, $inputStreamB.readPosVarInt)
"""
override def hash(element: ctx.TermName): ctx.Tree =
q"_root_.com.twitter.scalding.serialization.Hasher.int.hash($element.value)"
override def put(inputStream: ctx.TermName, element: ctx.TermName) =
q"$inputStream.writePosVarInt($element.value)"
override def get(inputStream: ctx.TermName): ctx.Tree =
q"${outerType.typeSymbol.companionSymbol}.apply($inputStream.readPosVarInt)"
override def compare(elementA: ctx.TermName, elementB: ctx.TermName): ctx.Tree =
q"""
_root_.java.lang.Integer.compare($elementA.value, $elementB.value) : Int
"""
override def length(element: Tree): CompileTimeLengthTypes[c.type] = CompileTimeLengthTypes.FastLengthCalculation(c)(q"posVarIntSize($element.value)")
override val lazyOuterVariables: Map[String, ctx.Tree] = Map.empty
}
}
}
|
benpence/scalding
|
scalding-thrift-macros/src/main/scala/com/twitter/scalding/thrift/macros/impl/ordered_serialization/ScroogeEnumOrderedBuf.scala
|
Scala
|
apache-2.0
| 2,469 |
package com.datastax.spark.connector.util
case class ConfigParameter[T](
val name: String,
val section: String,
val default: T,
val description: String) extends DataFrameOption {
override val sqlOptionName = name.replaceAll("\\\\.", "\\\\_")
def option(value: Any): Map[String, String] = {
require(value != null)
Map(name -> value.toString)
}
}
|
maasg/spark-cassandra-connector
|
spark-cassandra-connector/src/main/scala/com/datastax/spark/connector/util/ConfigParameter.scala
|
Scala
|
apache-2.0
| 367 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.launcher
import java.io.File
import java.util.{HashMap => JHashMap, List => JList, Map => JMap}
import scala.collection.JavaConverters._
import org.apache.spark.deploy.Command
/**
* This class is used by CommandUtils. It uses some package-private APIs in SparkLauncher, and since
* Java doesn't have a feature similar to `private[spark]`, and we don't want that class to be
* public, needs to live in the same package as the rest of the library.
*/
private[spark] class WorkerCommandBuilder(sparkHome: String, memoryMb: Int, command: Command)
extends AbstractCommandBuilder {
childEnv.putAll(command.environment.asJava)
childEnv.put(CommandBuilderUtils.ENV_SPARK_HOME, sparkHome)
override def buildCommand(env: JMap[String, String]): JList[String] = {
val cmd = buildJavaCommand(command.classPathEntries.mkString(File.pathSeparator))
cmd.add(s"-Xms${memoryMb}M")
cmd.add(s"-Xmx${memoryMb}M")
command.javaOpts.foreach(cmd.add)
CommandBuilderUtils.addPermGenSizeOpt(cmd)
addOptionString(cmd, getenv("SPARK_JAVA_OPTS"))
cmd
}
def buildCommand(): JList[String] = buildCommand(new JHashMap[String, String]())
}
|
chenc10/Spark-PAF
|
core/src/main/scala/org/apache/spark/launcher/WorkerCommandBuilder.scala
|
Scala
|
apache-2.0
| 1,987 |
package org.jetbrains.plugins.scala.annotator.gutter
/**
* Pavel.Fatin, 21.01.2010
*/
class GroupTest extends LineMarkerTestBase {
protected override def getBasePath = super.getBasePath + "/group/"
def testSolid(): Unit = doTest()
def testSeparated(): Unit = doTest()
def testMixed(): Unit = doTest()
def testMixedLine(): Unit = doTest()
def testStatement(): Unit = doTest()
}
|
JetBrains/intellij-scala
|
scala/scala-impl/test/org/jetbrains/plugins/scala/annotator/gutter/GroupTest.scala
|
Scala
|
apache-2.0
| 393 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql
import org.apache.spark.sql.catalyst.util.resourceToString
/**
* This test suite ensures all the TPC-H queries can be successfully analyzed, optimized
* and compiled without hitting the max iteration threshold.
*/
class TPCHQuerySuite extends BenchmarkQueryTest {
override def beforeAll() {
super.beforeAll()
sql(
"""
|CREATE TABLE `orders` (
|`o_orderkey` BIGINT, `o_custkey` BIGINT, `o_orderstatus` STRING,
|`o_totalprice` DECIMAL(10,0), `o_orderdate` DATE, `o_orderpriority` STRING,
|`o_clerk` STRING, `o_shippriority` INT, `o_comment` STRING)
|USING parquet
""".stripMargin)
sql(
"""
|CREATE TABLE `nation` (
|`n_nationkey` BIGINT, `n_name` STRING, `n_regionkey` BIGINT, `n_comment` STRING)
|USING parquet
""".stripMargin)
sql(
"""
|CREATE TABLE `region` (
|`r_regionkey` BIGINT, `r_name` STRING, `r_comment` STRING)
|USING parquet
""".stripMargin)
sql(
"""
|CREATE TABLE `part` (`p_partkey` BIGINT, `p_name` STRING, `p_mfgr` STRING,
|`p_brand` STRING, `p_type` STRING, `p_size` INT, `p_container` STRING,
|`p_retailprice` DECIMAL(10,0), `p_comment` STRING)
|USING parquet
""".stripMargin)
sql(
"""
|CREATE TABLE `partsupp` (`ps_partkey` BIGINT, `ps_suppkey` BIGINT,
|`ps_availqty` INT, `ps_supplycost` DECIMAL(10,0), `ps_comment` STRING)
|USING parquet
""".stripMargin)
sql(
"""
|CREATE TABLE `customer` (`c_custkey` BIGINT, `c_name` STRING, `c_address` STRING,
|`c_nationkey` BIGINT, `c_phone` STRING, `c_acctbal` DECIMAL(10,0),
|`c_mktsegment` STRING, `c_comment` STRING)
|USING parquet
""".stripMargin)
sql(
"""
|CREATE TABLE `supplier` (`s_suppkey` BIGINT, `s_name` STRING, `s_address` STRING,
|`s_nationkey` BIGINT, `s_phone` STRING, `s_acctbal` DECIMAL(10,0), `s_comment` STRING)
|USING parquet
""".stripMargin)
sql(
"""
|CREATE TABLE `lineitem` (`l_orderkey` BIGINT, `l_partkey` BIGINT, `l_suppkey` BIGINT,
|`l_linenumber` INT, `l_quantity` DECIMAL(10,0), `l_extendedprice` DECIMAL(10,0),
|`l_discount` DECIMAL(10,0), `l_tax` DECIMAL(10,0), `l_returnflag` STRING,
|`l_linestatus` STRING, `l_shipdate` DATE, `l_commitdate` DATE, `l_receiptdate` DATE,
|`l_shipinstruct` STRING, `l_shipmode` STRING, `l_comment` STRING)
|USING parquet
""".stripMargin)
}
val tpchQueries = Seq(
"q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11",
"q12", "q13", "q14", "q15", "q16", "q17", "q18", "q19", "q20", "q21", "q22")
tpchQueries.foreach { name =>
val queryString = resourceToString(s"tpch/$name.sql",
classLoader = Thread.currentThread().getContextClassLoader)
test(name) {
// check the plans can be properly generated
val plan = sql(queryString).queryExecution.executedPlan
checkGeneratedCode(plan)
}
}
}
|
pgandhi999/spark
|
sql/core/src/test/scala/org/apache/spark/sql/TPCHQuerySuite.scala
|
Scala
|
apache-2.0
| 3,901 |
/*
* Copyright (C) 2015 Stratio (http://stratio.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.stratio.sparta.serving.core.models.policy.writer
import com.stratio.sparta.serving.core.models.policy.cube.FieldModel
case class FromPkFieldsModel(field: FieldModel)
|
diegohurtado/sparta
|
serving-core/src/main/scala/com/stratio/sparta/serving/core/models/policy/writer/FromPkFieldsModel.scala
|
Scala
|
apache-2.0
| 801 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License
*/
package org.apache.toree.magic.builtin
import java.io.PrintStream
import com.google.common.base.Strings
import org.apache.toree.kernel.protocol.v5.MIMEType
import org.apache.toree.magic._
import org.apache.toree.magic.dependencies.IncludeOutputStream
import org.apache.toree.utils.ArgumentParsingSupport
import org.slf4j.LoggerFactory
import org.apache.toree.plugins.annotations.Event
class JavaScript extends CellMagic with ArgumentParsingSupport
with IncludeOutputStream {
// Lazy because the outputStream is not provided at construction
private def printStream = new PrintStream(outputStream)
@Event(name = "javascript")
override def execute(code: String): CellMagicOutput = {
def printHelpAndReturn: CellMagicOutput = {
printHelp(printStream, """%JavaScript <string_code>""")
CellMagicOutput()
}
Strings.isNullOrEmpty(code) match {
case true => printHelpAndReturn
case false => CellMagicOutput(MIMEType.ApplicationJavaScript -> code)
}
}
}
|
chipsenkbeil/incubator-toree
|
kernel/src/main/scala/org/apache/toree/magic/builtin/JavaScript.scala
|
Scala
|
apache-2.0
| 1,818 |
package org.qcri.rheem.spark.operators.graph
import java.lang.{Long => JavaLong}
import java.util
import java.util.Collections
import org.apache.spark.graphx.Graph
import org.apache.spark.graphx.lib.PageRank
import org.qcri.rheem.basic.data.{Tuple2 => T2}
import org.qcri.rheem.basic.operators.PageRankOperator
import org.qcri.rheem.core.optimizer.costs.LoadProfileEstimators
import org.qcri.rheem.core.optimizer.{OptimizationContext, ProbabilisticDoubleInterval}
import org.qcri.rheem.core.platform.lineage.ExecutionLineageNode
import org.qcri.rheem.core.platform.{ChannelDescriptor, ChannelInstance}
import org.qcri.rheem.spark.channels.RddChannel
import org.qcri.rheem.spark.execution.SparkExecutor
import org.qcri.rheem.spark.operators.SparkExecutionOperator
/**
* GraphX-based implementation of the [[PageRankOperator]].
*/
class SparkPageRankOperator(_numIterations: Int,
_dampingFactor: Float,
_graphDensity: ProbabilisticDoubleInterval)
extends PageRankOperator(_numIterations, _dampingFactor, _graphDensity) with SparkExecutionOperator {
def this(that: PageRankOperator) = this(that.getNumIterations, that.getDampingFactor, that.getGraphDensity)
override def evaluate(inputs: Array[ChannelInstance],
outputs: Array[ChannelInstance],
sparkExecutor: SparkExecutor,
operatorContext: OptimizationContext#OperatorContext) = {
val input = inputs(0).asInstanceOf[RddChannel#Instance]
val output = outputs(0).asInstanceOf[RddChannel#Instance]
val edgeRdd = input.provideRdd[T2[JavaLong, JavaLong]]().rdd
.map(edge => (edge.field0.longValue, edge.field1.longValue))
val graph = Graph.fromEdgeTuples(edgeRdd, null)
val prGraph = PageRank.run(graph, this.numIterations, 1d - this.dampingFactor)
val resultRdd = prGraph.vertices
.map { case (vertexId, pageRank) => new T2(vertexId, pageRank.toFloat) }
.toJavaRDD
output.accept(resultRdd, sparkExecutor)
val mainExecutionLineageNode = new ExecutionLineageNode(operatorContext)
mainExecutionLineageNode.add(LoadProfileEstimators.createFromSpecification(
"rheem.spark.pagerank.load.main", sparkExecutor.getConfiguration
))
mainExecutionLineageNode.addPredecessor(input.getLineage)
val outputExecutionLineageNode = new ExecutionLineageNode(operatorContext)
outputExecutionLineageNode.add(LoadProfileEstimators.createFromSpecification(
"rheem.spark.pagerank.load.output", sparkExecutor.getConfiguration
))
output.getLineage.addPredecessor(outputExecutionLineageNode)
mainExecutionLineageNode.collectAndMark()
}
override def getLoadProfileEstimatorConfigurationKeys: java.util.Collection[String] =
java.util.Arrays.asList("rheem.spark.pagerank.load.main", "rheem.spark.pagerank.load.output")
override def getSupportedInputChannels(index: Int): util.List[ChannelDescriptor] = {
assert(index == 0)
Collections.singletonList(RddChannel.CACHED_DESCRIPTOR)
}
override def getSupportedOutputChannels(index: Int): util.List[ChannelDescriptor] = {
assert(index == 0)
Collections.singletonList(RddChannel.UNCACHED_DESCRIPTOR)
}
override def containsAction(): Boolean = true
}
|
jonasrk/rheem
|
rheem-platforms/rheem-spark/src/main/scala/org/qcri/rheem/spark/operators/graph/SparkPageRankOperator.scala
|
Scala
|
apache-2.0
| 3,288 |
/*
* Copyright 2017 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.ct600.accounts
import org.mockito.Mockito._
import org.scalatest.mockito.MockitoSugar
import org.scalatest.{Matchers, WordSpec}
import uk.gov.hmrc.ct.accounts.MockFrs10xAccountsRetriever
import uk.gov.hmrc.ct.accounts.frs10x.boxes._
import uk.gov.hmrc.ct.box.CtValidation
import uk.gov.hmrc.ct.{CompaniesHouseFiling, HMRCFiling, MicroEntityFiling, StatutoryAccountsFiling}
class AC8023Spec extends WordSpec with MockitoSugar with Matchers with MockFrs10xAccountsRetriever {
"AC8023 validate" should {
"for HMRC Only micro entity filing" when {
"return errors when AC8023 is empty" in {
when(filingAttributesBoxValueRetriever.hmrcFiling()).thenReturn(HMRCFiling(true))
when(filingAttributesBoxValueRetriever.companiesHouseFiling()).thenReturn(CompaniesHouseFiling(false))
when(filingAttributesBoxValueRetriever.microEntityFiling()).thenReturn(MicroEntityFiling(true))
when(filingAttributesBoxValueRetriever.statutoryAccountsFiling()).thenReturn(StatutoryAccountsFiling(false))
AC8023(None).validate(boxRetriever) shouldBe Set(CtValidation(Some("AC8023"), "error.AC8023.required"))
}
"validate successfully when AC8023 is true" in {
when(filingAttributesBoxValueRetriever.hmrcFiling()).thenReturn(HMRCFiling(true))
when(filingAttributesBoxValueRetriever.companiesHouseFiling()).thenReturn(CompaniesHouseFiling(false))
when(filingAttributesBoxValueRetriever.microEntityFiling()).thenReturn(MicroEntityFiling(true))
when(filingAttributesBoxValueRetriever.statutoryAccountsFiling()).thenReturn(StatutoryAccountsFiling(false))
AC8023(Some(true)).validate(boxRetriever) shouldBe Set()
}
"validate successfully when AC8023 is false" in {
when(filingAttributesBoxValueRetriever.hmrcFiling()).thenReturn(HMRCFiling(true))
when(filingAttributesBoxValueRetriever.companiesHouseFiling()).thenReturn(CompaniesHouseFiling(false))
when(filingAttributesBoxValueRetriever.microEntityFiling()).thenReturn(MicroEntityFiling(true))
when(filingAttributesBoxValueRetriever.statutoryAccountsFiling()).thenReturn(StatutoryAccountsFiling(false))
AC8023(Some(false)).validate(boxRetriever) shouldBe Set()
}
}
"for Joint micro entity filing" when {
"return errors when AC8023 is empty" in {
when(filingAttributesBoxValueRetriever.hmrcFiling()).thenReturn(HMRCFiling(true))
when(filingAttributesBoxValueRetriever.companiesHouseFiling()).thenReturn(CompaniesHouseFiling(true))
when(filingAttributesBoxValueRetriever.microEntityFiling()).thenReturn(MicroEntityFiling(true))
when(filingAttributesBoxValueRetriever.statutoryAccountsFiling()).thenReturn(StatutoryAccountsFiling(false))
AC8023(None).validate(boxRetriever) shouldBe Set(CtValidation(Some("AC8023"), "error.AC8023.required"))
}
"validate successfully when AC8023 is true" in {
when(filingAttributesBoxValueRetriever.hmrcFiling()).thenReturn(HMRCFiling(true))
when(filingAttributesBoxValueRetriever.companiesHouseFiling()).thenReturn(CompaniesHouseFiling(true))
when(filingAttributesBoxValueRetriever.microEntityFiling()).thenReturn(MicroEntityFiling(true))
when(filingAttributesBoxValueRetriever.statutoryAccountsFiling()).thenReturn(StatutoryAccountsFiling(false))
AC8023(Some(true)).validate(boxRetriever) shouldBe Set()
}
"validate successfully when AC8023 is false" in {
when(filingAttributesBoxValueRetriever.hmrcFiling()).thenReturn(HMRCFiling(true))
when(filingAttributesBoxValueRetriever.companiesHouseFiling()).thenReturn(CompaniesHouseFiling(true))
when(filingAttributesBoxValueRetriever.microEntityFiling()).thenReturn(MicroEntityFiling(true))
when(filingAttributesBoxValueRetriever.statutoryAccountsFiling()).thenReturn(StatutoryAccountsFiling(false))
AC8023(Some(false)).validate(boxRetriever) shouldBe Set()
}
}
"for CoHo Only micro entity filing" when {
"validate successfully when AC8023 is empty" in {
when(filingAttributesBoxValueRetriever.hmrcFiling()).thenReturn(HMRCFiling(false))
when(filingAttributesBoxValueRetriever.companiesHouseFiling()).thenReturn(CompaniesHouseFiling(true))
when(filingAttributesBoxValueRetriever.microEntityFiling()).thenReturn(MicroEntityFiling(true))
when(filingAttributesBoxValueRetriever.statutoryAccountsFiling()).thenReturn(StatutoryAccountsFiling(false))
AC8023(None).validate(boxRetriever) shouldBe Set()
}
"cannot exist when AC8023 is true" in {
when(filingAttributesBoxValueRetriever.hmrcFiling()).thenReturn(HMRCFiling(false))
when(filingAttributesBoxValueRetriever.companiesHouseFiling()).thenReturn(CompaniesHouseFiling(true))
when(filingAttributesBoxValueRetriever.microEntityFiling()).thenReturn(MicroEntityFiling(true))
when(filingAttributesBoxValueRetriever.statutoryAccountsFiling()).thenReturn(StatutoryAccountsFiling(false))
AC8023(Some(true)).validate(boxRetriever) shouldBe Set(CtValidation(Some("AC8023"), "error.AC8023.cannot.exist"))
}
"cannot exist when AC8023 is false" in {
when(filingAttributesBoxValueRetriever.hmrcFiling()).thenReturn(HMRCFiling(false))
when(filingAttributesBoxValueRetriever.companiesHouseFiling()).thenReturn(CompaniesHouseFiling(true))
when(filingAttributesBoxValueRetriever.microEntityFiling()).thenReturn(MicroEntityFiling(true))
when(filingAttributesBoxValueRetriever.statutoryAccountsFiling()).thenReturn(StatutoryAccountsFiling(false))
AC8023(Some(false)).validate(boxRetriever) shouldBe Set(CtValidation(Some("AC8023"), "error.AC8023.cannot.exist"))
}
}
"for HMRC Only statutory filing" when {
"validate successfully when AC8023 is empty" in {
when(filingAttributesBoxValueRetriever.hmrcFiling()).thenReturn(HMRCFiling(true))
when(filingAttributesBoxValueRetriever.companiesHouseFiling()).thenReturn(CompaniesHouseFiling(false))
when(filingAttributesBoxValueRetriever.microEntityFiling()).thenReturn(MicroEntityFiling(false))
when(filingAttributesBoxValueRetriever.statutoryAccountsFiling()).thenReturn(StatutoryAccountsFiling(true))
AC8023(None).validate(boxRetriever) shouldBe Set()
}
"validate successfully when AC8023 is true" in {
when(filingAttributesBoxValueRetriever.hmrcFiling()).thenReturn(HMRCFiling(true))
when(filingAttributesBoxValueRetriever.companiesHouseFiling()).thenReturn(CompaniesHouseFiling(false))
when(filingAttributesBoxValueRetriever.microEntityFiling()).thenReturn(MicroEntityFiling(false))
when(filingAttributesBoxValueRetriever.statutoryAccountsFiling()).thenReturn(StatutoryAccountsFiling(true))
AC8023(Some(true)).validate(boxRetriever) shouldBe Set(CtValidation(Some("AC8023"), "error.AC8023.cannot.exist"))
}
"validate successfully when AC8023 is false" in {
when(filingAttributesBoxValueRetriever.hmrcFiling()).thenReturn(HMRCFiling(true))
when(filingAttributesBoxValueRetriever.companiesHouseFiling()).thenReturn(CompaniesHouseFiling(false))
when(filingAttributesBoxValueRetriever.microEntityFiling()).thenReturn(MicroEntityFiling(false))
when(filingAttributesBoxValueRetriever.statutoryAccountsFiling()).thenReturn(StatutoryAccountsFiling(true))
AC8023(Some(false)).validate(boxRetriever) shouldBe Set(CtValidation(Some("AC8023"), "error.AC8023.cannot.exist"))
}
}
"for Joint statutory filing" when {
"return errors when AC8023 is empty" in {
when(filingAttributesBoxValueRetriever.hmrcFiling()).thenReturn(HMRCFiling(true))
when(filingAttributesBoxValueRetriever.companiesHouseFiling()).thenReturn(CompaniesHouseFiling(true))
when(filingAttributesBoxValueRetriever.microEntityFiling()).thenReturn(MicroEntityFiling(false))
when(filingAttributesBoxValueRetriever.statutoryAccountsFiling()).thenReturn(StatutoryAccountsFiling(true))
AC8023(None).validate(boxRetriever) shouldBe Set()
}
"validate successfully when AC8023 is true" in {
when(filingAttributesBoxValueRetriever.hmrcFiling()).thenReturn(HMRCFiling(true))
when(filingAttributesBoxValueRetriever.companiesHouseFiling()).thenReturn(CompaniesHouseFiling(true))
when(filingAttributesBoxValueRetriever.microEntityFiling()).thenReturn(MicroEntityFiling(false))
when(filingAttributesBoxValueRetriever.statutoryAccountsFiling()).thenReturn(StatutoryAccountsFiling(true))
AC8023(Some(true)).validate(boxRetriever) shouldBe Set(CtValidation(Some("AC8023"), "error.AC8023.cannot.exist"))
}
"validate successfully when AC8023 is false" in {
when(filingAttributesBoxValueRetriever.hmrcFiling()).thenReturn(HMRCFiling(true))
when(filingAttributesBoxValueRetriever.companiesHouseFiling()).thenReturn(CompaniesHouseFiling(true))
when(filingAttributesBoxValueRetriever.microEntityFiling()).thenReturn(MicroEntityFiling(false))
when(filingAttributesBoxValueRetriever.statutoryAccountsFiling()).thenReturn(StatutoryAccountsFiling(true))
AC8023(Some(false)).validate(boxRetriever) shouldBe Set(CtValidation(Some("AC8023"), "error.AC8023.cannot.exist"))
}
}
"for CoHo Only statutory filing" when {
"validate successfully when AC8023 is empty" in {
when(filingAttributesBoxValueRetriever.hmrcFiling()).thenReturn(HMRCFiling(false))
when(filingAttributesBoxValueRetriever.companiesHouseFiling()).thenReturn(CompaniesHouseFiling(true))
when(filingAttributesBoxValueRetriever.microEntityFiling()).thenReturn(MicroEntityFiling(false))
when(filingAttributesBoxValueRetriever.statutoryAccountsFiling()).thenReturn(StatutoryAccountsFiling(true))
AC8023(None).validate(boxRetriever) shouldBe Set()
}
"cannot exist when AC8023 is true" in {
when(filingAttributesBoxValueRetriever.hmrcFiling()).thenReturn(HMRCFiling(false))
when(filingAttributesBoxValueRetriever.companiesHouseFiling()).thenReturn(CompaniesHouseFiling(true))
when(filingAttributesBoxValueRetriever.microEntityFiling()).thenReturn(MicroEntityFiling(false))
when(filingAttributesBoxValueRetriever.statutoryAccountsFiling()).thenReturn(StatutoryAccountsFiling(true))
AC8023(Some(true)).validate(boxRetriever) shouldBe Set(CtValidation(Some("AC8023"), "error.AC8023.cannot.exist"))
}
"cannot exist when AC8023 is false" in {
when(filingAttributesBoxValueRetriever.hmrcFiling()).thenReturn(HMRCFiling(false))
when(filingAttributesBoxValueRetriever.companiesHouseFiling()).thenReturn(CompaniesHouseFiling(true))
when(filingAttributesBoxValueRetriever.microEntityFiling()).thenReturn(MicroEntityFiling(false))
when(filingAttributesBoxValueRetriever.statutoryAccountsFiling()).thenReturn(StatutoryAccountsFiling(true))
AC8023(Some(false)).validate(boxRetriever) shouldBe Set(CtValidation(Some("AC8023"), "error.AC8023.cannot.exist"))
}
}
}
}
|
liquidarmour/ct-calculations
|
src/test/scala/uk/gov/hmrc/ct/ct600/accounts/AC8023Spec.scala
|
Scala
|
apache-2.0
| 11,864 |
package coder.simon.types.free
import scalaz._
import Scalaz._
object M3 extends App {
case class Logger[LOG, A](log: LOG, value: A) {
def map[B](f: A => B): Logger[LOG, B] = Logger(log, f(value))
def flatMap[B](f: A => Logger[LOG, B])(implicit m: Monoid[LOG]) = {
val n = f(value)
Logger(log |+| n.log, n.value)
}
}
object Logger {
implicit def toLogger[LOG](implicit m: Monoid[LOG]) = new Monad[({ type L[X] = Logger[LOG, X] })#L] {
def point[A](a: => A) = Logger(m.zero, a)
def bind[A, B](la: Logger[LOG, A])(f: A => Logger[LOG, B]) = la.flatMap(f)
}
}
final implicit class LoggerOps[A](a: A) {
def addLog[LOG](log: LOG): Logger[LOG, A] = Logger(log, a)
}
// def enterInt(x: Int) = Logger(s"Enter Int:$x", x)
// def enterStr(str: String) = Logger(s"Enter String:$str", str)
//
// val r = for {
// a <- enterInt(3)
// b <- enterStr("4")
// } yield a + b
//
// println(r)
val r = for {
a <- 3.addLog("Enter Int 3,")
b <- "4".addLog("Enter String 4")
} yield a + b
println(r)
val x = 3 set Vector("Enter Int 3")
val y = "wokao".tell
def gcd(a: Int, b: Int): Writer[Vector[String], Int] = b match {
case 0 =>
Vector(s"Finish at $a").tell map { _ => a }
case _ =>
Vector(s"$a mod $b = ${a % b}").tell >>= { _ => gcd(b, a % b) }
}
println(gcd(8, 3))
}
|
erlangxk/fpscala
|
src/main/scala/coder/simon/types/free/M3.scala
|
Scala
|
mit
| 1,393 |
package com.openquant.quoter.common
import java.io.InputStreamReader
import com.openquant.quoter.utils
import org.joda.time.format.ISODateTimeFormat
/**
* @author piotr 31.05.15
*/
class FutureList {
object FutureContract extends Enumeration {
type FutureContract = Value
val LightCrudeOil = Value
val BrentOil = Value
}
import FutureContract._
private def getLightCrudeOilContracts(): Map[String, Future] = {
val is = getClass.getResourceAsStream("/cl.csv")
if (is == null)
Map.empty[String, Future]
import com.github.tototoshi.csv._
val rdr = CSVReader.open(new InputStreamReader(is))
def parseCSVLine(x: List[String]): (String, Future) = {
val df = ISODateTimeFormat.dateHourMinuteSecond()
val d = df.parseDateTime(x(3)).toDate()
(x(0) → new Future("CL", "NYMEX", "USD", Some(d)))
}
rdr.toStream.drop(1).map(parseCSVLine).toMap
}
def getContracts(futureContract: FutureContract.Value): Map[String, Future] = futureContract match {
case LightCrudeOil ⇒ getLightCrudeOilContracts()
case _ ⇒ throw new RuntimeException(s"Contract ${futureContract} not supported")
}
}
|
openquant/quoter
|
src/main/scala/com/larroy/quoter/common/FutureList.scala
|
Scala
|
lgpl-3.0
| 1,170 |
/***********************************************************************
* Copyright (c) 2013-2016 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
*************************************************************************/
package org.locationtech.geomesa.metrics.reporters
import java.io.File
import java.nio.file.Files
import java.util.concurrent.TimeUnit
import java.util.concurrent.atomic.AtomicLong
import com.codahale.metrics._
import org.apache.commons.io.FileUtils
import org.junit.runner.RunWith
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
import scala.io.Source
@RunWith(classOf[JUnitRunner])
class DelimitedFileReporterTest extends Specification {
sequential
val registry = new MetricRegistry()
val folder = Files.createTempDirectory("geomesa-metrics").toFile
val reporter = DelimitedFileReporter.forRegistry(registry)
.aggregate(true)
.withTabs()
.convertDurationsTo(TimeUnit.MILLISECONDS)
.convertRatesTo(TimeUnit.SECONDS)
.build(folder.getAbsolutePath)
def read(metric: String, name: String): Seq[Array[String]] = {
val source = Source.fromFile(new File(folder, s"$metric.tsv"))
try {
source.getLines().toList.map(_.split("\\t")).filter(_.apply(1) == name).map(_.drop(2)) // drop name and timestamp
} finally {
source.close()
}
}
"DelimitedFileReporter" should {
"report gauges" >> {
val name = "mygauge"
registry.register(name, new Gauge[String] {
override def getValue: String = "value1"
})
reporter.report()
reporter.flush()
registry.remove(name)
val entries = read("gauges", name)
entries must haveLength(1)
entries.head must haveLength(1)
entries.head(0) mustEqual "value1"
}
"report counters" >> {
val name = "mycounter"
val metric = registry.counter(name)
metric.inc(10)
reporter.report()
reporter.flush()
registry.remove(name)
val entries = read("counters", name)
entries must haveLength(1)
entries.head must haveLength(1)
entries.head(0) mustEqual "10"
}
"report histograms" >> {
val name = "myhistogram"
val metric = registry.histogram(name)
(0 until 10).foreach(metric.update)
reporter.report()
reporter.flush()
registry.remove(name)
val entries = read("histograms", name)
entries must haveLength(1)
entries.head must haveLength(11)
entries.head(0).toDouble mustEqual 10.0 // count
entries.head(1).toDouble mustEqual 0.0 // min
entries.head(2).toDouble mustEqual 9.0 // max
entries.head(3).toDouble mustEqual 4.5 // mean
entries.head(4).toDouble mustEqual 2.87 // std dev
entries.head(5).toDouble mustEqual 5.0 // median
entries.head(6).toDouble mustEqual 7.0 // 75th
entries.head(7).toDouble mustEqual 9.0 // 95th
entries.head(8).toDouble mustEqual 9.0 // 98th
entries.head(9).toDouble mustEqual 9.0 // 99th
entries.head(10).toDouble mustEqual 9.0 // 999th
}
"report meters" >> {
val name = "mymeter"
val tick = new AtomicLong(0)
val clock = new Clock { override def getTick: Long = tick.get * 1000 } // tick is in nanos - we use millis
val metric = registry.register(name, new Meter(clock))
(0 until 10).foreach { i => tick.addAndGet(i); metric.mark() }
reporter.report()
reporter.flush()
registry.remove(name)
val entries = read("meters", name)
entries must haveLength(1)
entries.head must haveLength(6)
entries.head(0).toDouble mustEqual 10.0
entries.head(1).toDouble mustEqual 222222.22
entries.head(2).toDouble mustEqual 0.0
entries.head(3).toDouble mustEqual 0.0
entries.head(4).toDouble mustEqual 0.0
entries.head(5) mustEqual "events/second"
}
"report timers" >> {
val name = "mytimer"
val tick = new AtomicLong(0)
val clock = new Clock { override def getTick: Long = tick.get * 1000000 } // tick is in nanos - we use seconds
val metric = registry.register(name, new Timer(new SlidingWindowReservoir(100), clock))
(0 until 10).foreach { i =>
tick.addAndGet(i)
val c = metric.time()
tick.addAndGet(i)
c.stop()
}
reporter.report()
reporter.flush()
registry.remove(name)
val entries = read("timers", name)
entries must haveLength(1)
entries.head must haveLength(17)
entries.head(0).toDouble mustEqual 10.0
entries.head(1).toDouble mustEqual 0.0
entries.head(2).toDouble mustEqual 9.0
entries.head(3).toDouble mustEqual 4.5
entries.head(4).toDouble mustEqual 3.03
entries.head(5).toDouble mustEqual 4.5
entries.head(6).toDouble mustEqual 7.25
entries.head(7).toDouble mustEqual 9.0
entries.head(8).toDouble mustEqual 9.0
entries.head(9).toDouble mustEqual 9.0
entries.head(10).toDouble mustEqual 9.0
entries.head(11).toDouble mustEqual 111.11
entries.head(12).toDouble mustEqual 0.0
entries.head(13).toDouble mustEqual 0.0
entries.head(14).toDouble mustEqual 0.0
entries.head(15) mustEqual "calls/second"
entries.head(16) mustEqual "milliseconds"
}
}
step {
reporter.stop()
FileUtils.deleteDirectory(folder) // recursive delete of contents
}
}
|
nagavallia/geomesa
|
geomesa-metrics/src/test/scala/org/locationtech/geomesa/metrics/reporters/DelimitedFileReporterTest.scala
|
Scala
|
apache-2.0
| 5,664 |
package org.jetbrains.plugins.scala
package annotator
import org.jetbrains.plugins.scala.base.libraryLoaders.{LibraryLoader, ScalaReflectLibraryLoader}
class MacroDefAnnotatorTest extends ScalaHighlightingTestBase {
override protected def supportedIn(version: ScalaVersion): Boolean = version >= LatestScalaVersions.Scala_2_13
override def librariesLoaders: Seq[LibraryLoader] =
super.librariesLoaders :+ ScalaReflectLibraryLoader
private def doTest(text: String)(expectedErrors: Message*): Unit = {
val errors = errorsFromScalaCode(text)
assertMessages(errors)(expectedErrors: _*)
}
def testMacroDef(): Unit = doTest(
"""
|object Test {
| import scala.reflect.macros._
| def helloWorld = macro helloImpl
|
| def helloImpl(c: blackbox.Context): c.Expr[Unit] = { ??? }
|}
|""".stripMargin)(Error("helloWorld", "Macro defs must have explicitly specified return types"))
def testHasExplicitType(): Unit = doTest(
"""
|object Test {
| import scala.reflect.macros._
| def helloWorld: Unit = macro helloImpl
|
| def helloImpl(c: blackbox.Context): c.Expr[Unit] = { ??? }
|}
|""".stripMargin)()
}
|
JetBrains/intellij-scala
|
scala/scala-impl/test/org/jetbrains/plugins/scala/annotator/MacroDefAnnotatorTest.scala
|
Scala
|
apache-2.0
| 1,218 |
package dawn.flow
sealed trait Block[A] extends Source[A] { parent =>
override def scheduler = out.scheduler
def out: Source[A]
val trans = new Op1[A, A] {
def rawSource1 = out
def listen1(x: Timestamped[A]) =
parent.broadcast(x)
def name = "Block out"
}
}
trait Block0[A] extends Source0 with Block[A]
trait Block1[A, B] extends Source1[A] with Block[B] {
def listen1(x: Timestamped[A]) = ()
}
trait Block2[A, B, C] extends Source2[A, B] with Block[C] {
def listen1(x: Timestamped[A]) = ()
def listen2(x: Timestamped[B]) = ()
}
trait Block3[A, B, C, D] extends Source3[A, B, C] with Block[D] {
def listen1(x: Timestamped[A]) = ()
def listen2(x: Timestamped[B]) = ()
def listen3(x: Timestamped[C]) = ()
}
trait Block4[A, B, C, D, E] extends Source4[A, B, C, D] with Block[E] {
def listen1(x: Timestamped[A]) = ()
def listen2(x: Timestamped[B]) = ()
def listen3(x: Timestamped[C]) = ()
def listen4(x: Timestamped[D]) = ()
}
trait Block5[A, B, C, D, E, F] extends Source5[A, B, C, D, E] with Block[F] {
def listen1(x: Timestamped[A]) = ()
def listen2(x: Timestamped[B]) = ()
def listen3(x: Timestamped[C]) = ()
def listen4(x: Timestamped[D]) = ()
def listen5(x: Timestamped[E]) = ()
}
trait Block6[A, B, C, D, E, F, G] extends Source6[A, B, C, D, E, F] with Block[G] {
def listen1(x: Timestamped[A]) = ()
def listen2(x: Timestamped[B]) = ()
def listen3(x: Timestamped[C]) = ()
def listen4(x: Timestamped[D]) = ()
def listen5(x: Timestamped[E]) = ()
def listen6(x: Timestamped[F]) = ()
}
|
rubenfiszel/scala-flow
|
core/src/main/scala/Block.scala
|
Scala
|
mit
| 1,572 |
package com.twitter.server.util
import com.twitter.conversions.time._
import com.twitter.finagle.stats.{StatsRegistry, StatEntry}
import com.twitter.util.Time
import org.junit.runner.RunWith
import org.scalatest.FunSuite
import org.scalatest.junit.JUnitRunner
private[server] object MetricSourceTest {
class Ctx {
case class Entry(delta: Double, value: Double) extends StatEntry
private[twitter] var underlying = Map[String, StatEntry]()
val sr = new StatsRegistry { def apply() = underlying }
val registry = { () => Seq(sr) }
val source = new MetricSource(registry, 1.second)
}
}
@RunWith(classOf[JUnitRunner])
class MetricSourceTest extends FunSuite {
import MetricSourceTest._
test("get") {
Time.withCurrentTimeFrozen { tc =>
val ctx = new Ctx
import ctx._
underlying = Map("clnt/foo/requests" -> Entry(0.0, 10.0))
assert(source.get("clnt/foo/requests") == None)
tc.advance(1.second)
assert(source.get("clnt/foo/requests").get.delta == 0.0)
assert(source.get("clnt/foo/requests").get.value == 10.0)
}
}
test("contains") {
Time.withCurrentTimeFrozen { tc =>
val ctx = new Ctx
import ctx._
underlying = Map("clnt/foo/requests" -> Entry(0.0, 0.0))
assert(source.contains("clnt/foo/requests") == false)
tc.advance(1.second)
assert(source.contains("clnt/foo/requests") == true)
}
}
test("keySet") {
Time.withCurrentTimeFrozen { tc =>
val ctx = new Ctx
import ctx._
underlying = Map(
"clnt/foo/requests" -> Entry(0.0, 0.0),
"clnt/foo/success" -> Entry(0.0, 0.0))
assert(source.keySet == Set.empty[String])
tc.advance(1.second)
assert(source.keySet == Set("clnt/foo/requests", "clnt/foo/success"))
}
}
}
|
BuoyantIO/twitter-server
|
src/test/scala/com/twitter/server/util/MetricSourceTest.scala
|
Scala
|
apache-2.0
| 1,803 |
/**
* FILE: PageFactory.scala
* PERCORSO /Codice/sgad/servertier/src/main/scala/sgad/servertier/presentation/pagemanager
* DATA CREAZIONE: 27 Febbraio 2014
* AUTORE: ProTech
* EMAIL: [email protected]
*
* Questo file è proprietà del gruppo ProTech, viene rilasciato sotto licenza Apache v2.
*
* DIARIO DELLE MODIFICHE:
* 2014-02-27 - Creazione della classe - Biancucci Maurizio
* 2014-02-27 - Stesura metodo getHomePageWithErrors - Segantin Fabio
*/
package sgad.servertier.presentation.pagemanager
import scala.collection._
import scala.collection.mutable.ArrayBuffer
import java.io._
import scala.io._
import scala.sys.process._
/**
* Classe che gestisce le pagine HTML da inviare ai client.
*/
object PageFactory {
/**
* Indirizzo su cui il server HTTP è reperibile.
*/
private var addressRequest = ""
/**
* Contiene il codice HTML della home page.
*/
private var homePage = "Home"
/**
* La prima parte del codice della pagina di gioco.
*/
private var canvas1 = "Canvas"
/**
* La seconda parte della pagina di gioco, localizza un punto dove iniettare le personalizzazioni nel codice JavaScript lato client.
*/
private var canvas2ToReplace = ""
/**
* Terza ed ultima parte della pagina di gioco.
*/
private var canvas3 = ""
/**
* Decide se il codice di gioco lato client viene minimizzato al caricamento.
*/
private var isCodeToMinimize = false
/**
* Metodo getter per l'attributo addressRequest.
* @return L'indirizzo publico in cui il server HTTP è reperibile.
*/
def getAddressRequest = addressRequest
/**
* Esegue l'inizializzazione caricando le risorse esterne come le pagine del sito e il codice di gioco lato client, eventualmente minimizzandolo.
* @param addressRequest L'indirizzo su cui l'applicazione risponde alle richieste HTTP.
* @param isCodeToMinimize Decide se il codice di gioco lato client viene minimizzato al caricamento.
*/
def inizialize(addressRequest: String, isCodeToMinimize: Boolean): Boolean = {
this.addressRequest = addressRequest
this.isCodeToMinimize = isCodeToMinimize
loadWebPages() & loadCanvas()
}
/**
* Carica il codice di gioco in memoria ed eventualmente lo minimizza in base all'attributo isCodeToMinimize.
* @return True se il load del codice di gioco va a buon fine.
*/
private def loadCanvas(): Boolean = {
try {
//Carica la lista dei dei sorgenti javascript client da inserire nella prima parte del canvas.
val list1 = Source.fromFile("src/main/resources/canvas/ListaJS1.conf")
//Carica la lista dei dei sorgenti javascript client da inserire nella seconda parte del canvas.
val list2 = Source.fromFile("src/main/resources/canvas/ListaJS2.conf")
//Carica la prima parte della pagina html contenente il canvas HTML5
val canvasPage1 = scala.io.Source.fromFile("src/main/resources/canvas/canvas1.html").mkString
//Carica la seconda parte della pagina html contenente il canvas HTML5
val canvasPage2 = scala.io.Source.fromFile("src/main/resources/canvas/canvas2.html").mkString
if (isCodeToMinimize)
println("Minimizzando il codice client.... l'operazione impiegherà qualche minuto.")
//Carica grazie nella variabile minimized1 il codice di canvasPage1 e tutto il javascript nella list1 e lo mette in canvas1
val minimized1 = new StringBuilder
list1.getLines().toArray.foreach((line: String) => {
if (isCodeToMinimize) //Chiama la minimizzazione grazie alla libreria esterna oppure leggere semplicemente il file
minimized1.append(Process("java -jar src/main/java/yuicompressor-2.4.8.jar ../" + line).!!)
else
minimized1.append(scala.io.Source.fromFile("../" + line).mkString)
})
canvas1 = canvasPage1 + minimized1.toString
//Carica grazie nella variabile minimized2 il codice della list2 e lo mette in canvas2
val minimized2 = new StringBuilder
list2.getLines().toArray.foreach((line: String) => {
if (isCodeToMinimize) //Chiama la minimizzazione grazie alla libreria esterna oppure leggere semplicemente il file
minimized2.append(Process("java -jar src/main/java/yuicompressor-2.4.8.jar ../" + line).!!)
else
minimized2.append(scala.io.Source.fromFile("../" + line).mkString)
})
canvas2ToReplace = minimized2.toString()
//Carica in canvas 3 il codice della seconda parte della pagina HTML
canvas3 = canvasPage2
}
catch {
//Se trovo errori nella lettura dei file restituisco false
case e: IOException => println("Errore di un'operazione di input durante il caricamento del codice di gioco"); println(e); return false
}
true
}
/**
* Metodo getter per l'attributo canvas1.
* @return La prima parte del codice della pagina di gioco.
*/
def getCanvas1 = canvas1
/**
* Metodo getter per l'attributo canvas2ToReplace.
* @return La seconda parte della pagina di gioco, localizza un punto dove iniettare le personalizzazioni nel codice JavaScript lato client.
*/
def getCanvas2ToReplace = canvas2ToReplace
/**
* Metodo getter per l'attributo canvas3.
* @return Terza e ultima parte della pagina di gioco.
*/
def getCanvas3 = canvas3
/**
* Carica in memoria tutto il codice delle web pages del sito del gioco.
*/
private def loadWebPages(): Boolean = {
try {
homePage = scala.io.Source.fromFile("src/main/resources/webpages/index.html").mkString
}
catch {
//In caso di errori retistuisci false
case e: IOException => println("Errore durante il caricamento delle web pages"); println(e); return false
}
true
}
/**
* Metodo getter che ritorna la homepage del sito.
* @return La home page del sito.
*/
def getHomePage: String = homePage
/**
* Metodo utilizzato dal pageManager per poter resituire la Homepage con gli errori.
* @param data I dati passati dalla pagina.
* @param errors Una lista di errori riscontrati.
* @return La pagina comprensiva degli errori.
*/
def getHomePageWithErrors(data: Map[String, String], errors: ArrayBuffer[String]): String = {
var finalPage: String = homePage
var registration = true
//vengono salvati i valori passati dal form in una mappa che se non contiene il valore restituisce la stringa vuota
val reinsertData = (mutable.Map() ++ data).withDefaultValue("")
errors.foreach({
case "RExistingUser" =>
//Viene fatto visualizzare il messaggio relativo all'username già in uso cercando il div con id user_usato
finalPage = """(?s)(<div(.)*?id=\\"user_usato\\"(.)*?)(>)""".r.replaceAllIn(finalPage, "$1 style=\\"display:block\\" $4")
case "RInvalidUser" =>
//Viene fatto visualizzare il messaggio relativo all'username non conforme cercando il div con id user_vuoto
finalPage = """(?s)(<div(.)*?id=\\"user_vuoto\\"(.)*?)(>)""".r.replaceAllIn(finalPage, "$1 style=\\"display:block\\" $4")
case "RInvalidEmail" =>
//Viene fatto visualizzare il messaggio relativo all'email non valida cercando il div con id errore_email
finalPage = """(?s)(<div(.)*?id=\\"errore_email\\"(.)*?)(>)""".r.replaceAllIn(finalPage, "$1 style=\\"display:block\\" $4")
case "RExistingEmail" =>
//Viene fatto visualizzare il messaggio relativo all'email già in uso cercando il div con id email_usata
finalPage = """(?s)(<div(.)*?id=\\"email_usata\\"(.)*?)(>)""".r.replaceAllIn(finalPage, "$1 style=\\"display:block\\" $4")
case "RInvalidPassword" =>
//Viene fatto visualizzare il messaggio relativo alla password non conforme cercando il div con id errore_password
finalPage = """(?s)(<div(.)*?id=\\"errore_password\\"(.)*?)(>)""".r.replaceAllIn(finalPage, "$1 style=\\"display:block\\" $4")
case "RNonMatchingPassword" =>
//Viene fatto visualizzare il messaggio relativo alle password non corrispondenti cercando il div con id errore_password2
finalPage = """(?s)(<div(.)*?id=\\"errore_password2\\"(.)*?)(>)""".r.replaceAllIn(finalPage, "$1 style=\\"display:block\\" $4")
case "IncorrectLogin" =>
//Viene fatto visualizzare il messaggio relativo alla login errata cercando il div con id login_errato
registration = false
finalPage = """(?s)(<div(.)*?id=\\"login_errato\\"(.)*?)(>)""".r.replaceAllIn(finalPage, "$1 style=\\"display:block\\" $4")
})
if (registration) {
//se si è trattato di un errore di registrazione resetto i campi imput con i valori passati
finalPage = """(?s)(<input(.)*?id=\\"register_username\\"(.)*?)(>)""".r.replaceAllIn(finalPage, "$1 value=\\"" + reinsertData("user") + "\\" $4")
finalPage = """(?s)(<input(.)*?id=\\"register_email\\"(.)*?)(>)""".r.replaceAllIn(finalPage, "$1 value=\\"" + reinsertData("email") + "\\" $4")
} else {
//altrimenti resetto i campi di login
finalPage = """(?s)(<input(.)*?id=\\"log_username\\"(.)*?)(>)""".r.replaceAllIn(finalPage, "$1 value=\\"" + reinsertData("user") + "\\" $4")
finalPage = """(?s)(<input(.)*?id=\\"log_password\\"(.)*?)(>)""".r.replaceAllIn(finalPage, "$1 value=\\"" + reinsertData("password") + "\\" $4")
}
finalPage
}
/**
* Metodo per ricevere la pagina di registrazione avvenuta.
* @return La pagina di registrazione avvenuta.
*/
def getHomePageRegistrationSuccessful = {
"""(?s)(<div id=\\"inner_maincontent\\">)((.)*?</form>[^<]*?)(</div>)""".r.replaceAllIn(homePage, "$1<h1>La registrazione è andata a buon fine</h1>$4")
}
/**
* Metodo per ricevere la pagina ricevuta quando il servizio non è disponibile.
* @return la pagina visualizzata quando il database non è raggiungibile.
*/
def getHomePageServiceIsDown = {
"""(?s)(<div id=\\"inner_maincontent\\">)((.)*?</form>[^<]*?)(</div>)""".r.replaceAllIn(homePage, "$1<h1>Il servizio è momentaneamente non disponibile, ci scusiamo per il disagio.</h1>$4")
}
}
|
protechunipd/SGAD
|
Codice/sgad/servertier/src/main/scala/sgad/servertier/presentation/pagemanager/PageFactory.scala
|
Scala
|
apache-2.0
| 9,630 |
/**
* Copyright 2017 RiskSense, Inc.
* This file is part of ipaddr library.
*
* Ipaddr is free software licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You may obtain a copy of the
* License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the
* License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.risksense.ipaddr
// scalastyle:off multiple.string.literals magic.number
class IpNetworkTest extends UnitSpec {
private val net1 = IpNetwork("192.168.1.2", 24)
private val net2 = IpNetwork("192.168.1.2/255.255.255.0")
private val net3 = IpNetwork("10.2.10.230/255.255.255.0")
private val net4 = IpNetwork(3232235778L, 32)
private val net5 = IpNetwork(net1.toString)
private val net6 = IpNetwork("0.0.0.0/0")
private val netAddr = "192.168.1.0"
private val netAddr2 = "192.168.1.255"
private val netAddr3 = "192.168.2.0"
private val netAddr4 = "192.168.0.255"
private val range = IpRange(netAddr, netAddr2)
private val range2 = IpRange(netAddr, netAddr3)
private val range3 = IpRange(netAddr4, netAddr2)
"Creating an IpNetwork " should "result in IpaddrException if address is invalid" in {
an[IpaddrException] should be thrownBy IpNetwork("192.168.256/24")
an[IpaddrException] should be thrownBy IpNetwork("192.168.256/256.255.255.0")
}
it should "create IpNetwork object if address is valid" in {
IpNetwork("10.2.1.0/24") shouldBe a[IpNetwork]
IpNetwork("10.2.1.0/255.255.255.0") shouldBe a[IpNetwork]
}
"Network" should "perform all network based operations correctly" in {
val net1Broadcast = IpAddress("192.168.1.255")
val net1Hostmask = IpAddress("0.0.0.255")
val net1Ip = IpAddress("192.168.1.2")
val net1IpAddr = IpAddress("192.168.1.0")
val net1Netmask = IpAddress("255.255.255.0")
net1.key should be((4, 3232235776L, 3232236031L))
net1.sortKey should be((4, 3232235776L, 23, 2))
net1.broadcast should be(net1Broadcast)
net1.cidr should be(net2)
net1.hostmask should be(net1Hostmask)
net1.ip should be(net1Ip)
net1.netmask should be(net1Netmask)
net1.size should be(256)
net1.first should be(3232235776L)
net1.last should be(3232236031L)
net1.network should be(net1IpAddr)
net6.size should be(4294967296L)
}
it should "perform supernet operation" in {
net1.supernet(22).size should be(2)
net1.supernet(33) should be(Nil)
}
it should "perform subnet operation" in {
net1.subnet(26).size should be(4) // scalastyle:ignore
net1.subnet(26).size should be(4)
net1.subnet(33) should be(Nil)
}
it should "perform next operation" in {
val net1Next = IpNetwork("192.168.2.0/24")
net1.next() == net1Next should be(true)
val maxNet = IpNetwork("255.255.255.255/30")
an[IpaddrException] should be thrownBy maxNet.next()
}
it should "perform previous operation" in {
val net1Previous = IpNetwork("192.168.0.0/24")
net1.previous() == net1Previous should be(true)
val minNet = IpNetwork("0.0.0.0/24")
an[IpaddrException] should be thrownBy minNet.previous()
}
it should "perform all comparison operations" in {
(net1 == net2) should be(true)
(net1 == net3) should be(false)
(net1 == "1.2.3.4") should be(false)
}
it should "perform allHosts operation" in {
IpNetwork("192.168.1.1/30").allHosts.force should be(
Seq(IpAddress("192.168.1.0"), IpAddress("192.168.1.1"),
IpAddress("192.168.1.2"), IpAddress("192.168.1.3"))
)
}
"Network object" should "not contain bad input" in {
an[IpaddrException] should be thrownBy net1.contains("abc")
an[IpaddrException] should be thrownBy net1.contains("2.0.0.0.")
}
it should "contain IpRange" in {
net1.contains(range) should be(true)
net1.contains(range2) should be(false)
net1.contains(range3) should be(false)
}
it should "contain Network" in {
net1.contains(net4) should be(true)
net1.contains(net5) should be(true)
net4.contains(net1) should be(false)
}
it should "contain IpAddress" in {
net1.contains(netAddr) should be(true)
net1.contains(netAddr2) should be(true)
net1.contains(netAddr3) should be(false)
net1.contains(netAddr4) should be(false)
}
}
|
risksense/ipaddr
|
src/test/scala/com/risksense/ipaddr/IpNetworkTest.scala
|
Scala
|
apache-2.0
| 4,598 |
package filodb.memory.format
import org.agrona.{DirectBuffer, ExpandableArrayBuffer}
import org.agrona.concurrent.UnsafeBuffer
import org.scalatest._
import org.scalatestplus.scalacheck.ScalaCheckPropertyChecks
import org.scalatest.funspec.AnyFunSpec
import org.scalatest.matchers.should.Matchers
class NibblePackTest extends AnyFunSpec with Matchers with ScalaCheckPropertyChecks {
it("should NibblePack 8 words partial non-zero even nibbles") {
// All 8 are nonzero, even # nibbles
val buf = new ExpandableArrayBuffer()
val inputs = Array(0L,
0x0000003322110000L, 0x0000004433220000L,
0x0000005544330000L, 0x0000006655440000L,
0L, 0L, 0L)
val outpos = NibblePack.pack8(inputs, buf, 0)
// Expected result:
val expectedBuf = Array[Byte](
0x1e, // 0b0001_1110u8, // only some bits on
0x54, // six nibbles wide, four zero nibbles trailing
0x11, 0x22, 0x33, 0x22, 0x33, 0x44,
0x33, 0x44, 0x55, 0x44, 0x55, 0x66)
outpos shouldEqual expectedBuf.length
buf.byteArray.take(expectedBuf.length) shouldEqual expectedBuf
}
it("should NibblePack 8 words partial non-zero odd nibbles") {
// All 8 are nonzero, even # nibbles
val buf = new ExpandableArrayBuffer()
val inputs = Array(0L,
0x0000003322100000L, 0x0000004433200000L,
0x0000005544300000L, 0x0000006655400000L,
0x0000007654300000L, 0L, 0L)
val outpos = NibblePack.pack8(inputs, buf, 0)
// Expected result:
val expectedBuf = Array[Byte](
0x3e, // 0b0011_1110u8, // only some bits on
0x45, // five nibbles wide, five zero nibbles trailing
0x21, 0x32, 0x23, 0x33, 0x44, // First two values
0x43, 0x54, 0x45, 0x55, 0x66,
0x43, 0x65, 0x07
)
outpos shouldEqual expectedBuf.length
buf.byteArray.take(expectedBuf.length) shouldEqual expectedBuf
}
it("should correctly unpack partial 8 words odd nibbles") {
val compressed = Array[Byte](
0x3e, // 0b0011_1110u8, // only some bits on
0x45, // five nibbles wide, five zero nibbles trailing
0x21, 0x32, 0x23, 0x33, 0x44, // First two values
0x43, 0x54, 0x45, 0x55, 0x66,
0x43, 0x65, 0x07
)
val expected = Array(0L,
0x0000003322100000L, 0x0000004433200000L,
0x0000005544300000L, 0x0000006655400000L,
0x0000007654300000L, 0L, 0L)
val inbuf = new UnsafeBuffer(compressed)
var outarray: Array[Long] = null
val res = NibblePack.unpack8(inbuf, new NibblePack.Sink {
def process(data: Array[Long]): Unit = { outarray = data }
})
res shouldEqual NibblePack.Ok
inbuf.capacity shouldEqual 0
outarray shouldEqual expected
}
it("should pack and unpack delta values") {
val inputs = Array(0L, 1000, 1001, 1002, 1003, 2005, 2010, 3034, 4045, 5056, 6067, 7078)
val buf = new ExpandableArrayBuffer()
val bytesWritten = NibblePack.packDelta(inputs, buf, 0)
val sink = NibblePack.DeltaSink(new Array[Long](inputs.size))
val bufSlice = new UnsafeBuffer(buf, 0, bytesWritten)
val res = NibblePack.unpackToSink(bufSlice, sink, inputs.size)
res shouldEqual NibblePack.Ok
sink.outArray shouldEqual inputs
val inputs2 = Array(10000, 1032583228027L)
val written2 = NibblePack.packDelta(inputs2, buf, 0)
val sink2 = NibblePack.DeltaSink(new Array[Long](inputs2.size))
bufSlice.wrap(buf, 0, written2)
val res2 = NibblePack.unpackToSink(bufSlice, sink2, inputs2.size)
res2 shouldEqual NibblePack.Ok
sink2.outArray shouldEqual inputs2
}
it("should pack and unpack double values") {
val inputs = Array(0.0, 2.5, 5.0, 7.5, 8, 13.2, 18.9, 89, 101.1, 102.3)
val buf = new ExpandableArrayBuffer()
val bytesWritten = NibblePack.packDoubles(inputs, buf, 0)
val bufSlice = new UnsafeBuffer(buf, 0, bytesWritten)
val out = new Array[Double](inputs.size)
val res = NibblePack.unpackDoubleXOR(bufSlice, out)
res shouldEqual NibblePack.Ok
out shouldEqual inputs
}
def unpackAndCompare(inbuf: DirectBuffer, orig: Array[Long]): Unit = {
val sink2 = NibblePack.DeltaSink(new Array[Long](orig.size))
val res = NibblePack.unpackToSink(inbuf, sink2, orig.size)
res shouldEqual NibblePack.Ok
sink2.outArray shouldEqual orig
}
def unpackAndCompare(buf: DirectBuffer, index: Int, numBytes: Int, orig: Array[Long]): Unit = {
val slice = new UnsafeBuffer(buf, index, numBytes)
unpackAndCompare(slice, orig)
}
it("should repack increasing deltas to diffs using DeltaDiffPackSink") {
val inputs = Seq(Array(0L, 1000, 1001, 1002, 1003, 2005, 2010, 3034, 4045, 5056, 6067, 7078),
Array(3L, 1004, 1006, 1008, 1009, 2010, 2020, 3056, 4070, 5090, 6101, 7150),
Array(7L, 1010, 1016, 1018, 1019, 2020, 2030, 3078, 4101, 5112, 6134, 7195))
val diffs = inputs.sliding(2).map { case twoInputs =>
twoInputs.last.clone.zipWithIndex.map { case (num, i) => num - twoInputs.head(i) }
}.toSeq
val writeBuf = new ExpandableArrayBuffer()
// Compress each individual input into its own buffer
val bufsAndSize = inputs.map { in =>
val buf = new ExpandableArrayBuffer()
val bytesWritten = NibblePack.packDelta(in, buf, 0)
(buf, bytesWritten)
}
// Now, use DeltaDiffPackSink to recompress to deltas from initial inputs
val sink = NibblePack.DeltaDiffPackSink(new Array[Long](inputs.head.size), writeBuf)
// Verify delta on first one (empty diffs) yields back the original
val (firstCompBuf, firstBufSize) = bufsAndSize.head
val bufSlice0 = new UnsafeBuffer(firstCompBuf, 0, firstBufSize)
val res0 = NibblePack.unpackToSink(bufSlice0, sink, inputs.head.size)
res0 shouldEqual NibblePack.Ok
val finalWritten0 = sink.writePos
finalWritten0 shouldEqual firstBufSize
unpackAndCompare(writeBuf, 0, finalWritten0, inputs.head)
// Verify delta on subsequent ones yields diff
var initPos = finalWritten0
sink.writePos shouldEqual initPos
bufsAndSize.drop(1).zip(diffs).foreach { case ((origCompressedBuf, origSize), diff) =>
sink.reset()
val bufSlice = new UnsafeBuffer(origCompressedBuf, 0, origSize)
val res = NibblePack.unpackToSink(bufSlice, sink, inputs.head.size)
res shouldEqual NibblePack.Ok
val finalWritten = sink.writePos
unpackAndCompare(writeBuf, initPos, finalWritten - initPos, diff)
initPos = finalWritten
sink.writePos shouldEqual finalWritten
}
}
it("should repack increasing deltas to section-relative diffs using DeltaSectDiffPackSink") {
val inputs = Seq(Array(0L, 1000, 1001, 1002, 1003, 2005, 2010, 3034, 4045, 5056, 6067, 7078),
Array(3L, 1004, 1006, 1008, 1009, 2010, 2020, 3056, 4070, 5090, 6101, 7150),
Array(7L, 1010, 1016, 1018, 1019, 2020, 2030, 3078, 4101, 5112, 6134, 7195))
val diffs = inputs.drop(1).map { in =>
in.clone.zipWithIndex.map { case (num, i) => num - inputs.head(i) }
}.toSeq
val writeBuf = new ExpandableArrayBuffer()
// Compress each individual input into its own buffer
val bufsAndSize = inputs.map { in =>
val buf = new ExpandableArrayBuffer()
val bytesWritten = NibblePack.packDelta(in, buf, 0)
(buf, bytesWritten)
}
// Now, use DeltaDiffPackSink to recompress to deltas from initial input
val sink = new NibblePack.DeltaSectDiffPackSink(inputs.head.size, writeBuf)
// Feed in initial delta/histogram
val bufSlice0 = new UnsafeBuffer(bufsAndSize.head._1, 0, bufsAndSize.head._2)
val res = NibblePack.unpackToSink(bufSlice0, sink, inputs.head.size)
res shouldEqual NibblePack.Ok
sink.setOriginal()
var initPos = sink.writePos
// Verify delta on subsequent ones yields diff
bufsAndSize.drop(1).zip(diffs).foreach { case ((origCompressedBuf, origSize), diff) =>
sink.reset()
val bufSlice = new UnsafeBuffer(origCompressedBuf, 0, origSize)
val res = NibblePack.unpackToSink(bufSlice, sink, inputs.head.size)
res shouldEqual NibblePack.Ok
val finalWritten = sink.writePos
unpackAndCompare(writeBuf, initPos, finalWritten - initPos, diff)
initPos = finalWritten
sink.writePos shouldEqual finalWritten
}
}
import org.scalacheck._
// Generate a list of increasing integers, every time bound it slightly differently
// (to test different int compression techniques)
def increasingLongList: Gen[Seq[Long]] =
for {
maxVal <- Gen.oneOf(1000, 5000, 30000, Math.pow(2L, 40).toLong)
seqList <- Gen.containerOf[Seq, Long](Gen.choose(10, maxVal))
} yield { seqList.scanLeft(10000L)(_ + Math.abs(_)) }
it("should pack and unpack random list of increasing Longs via delta") {
val buf = new ExpandableArrayBuffer()
forAll(increasingLongList) { longs =>
val inputs = longs.toArray
val bytesWritten = NibblePack.packDelta(inputs, buf, 0)
val sink = NibblePack.DeltaSink(new Array[Long](inputs.size))
val bufSlice = new UnsafeBuffer(buf, 0, bytesWritten)
val res = NibblePack.unpackToSink(bufSlice, sink, inputs.size)
res shouldEqual NibblePack.Ok
sink.outArray shouldEqual inputs
}
}
def increasingDoubleList: Gen[Seq[Double]] = increasingLongList.map(_.map(_.toDouble)).filter(_.length > 0)
it("should pack and unpack random list of increasing Doubles via XOR") {
val buf = new ExpandableArrayBuffer()
forAll(increasingDoubleList) { doubles =>
val inputs = doubles.toArray
val bytesWritten = NibblePack.packDoubles(inputs, buf, 0)
val bufSlice = new UnsafeBuffer(buf, 0, bytesWritten)
val out = new Array[Double](inputs.size)
val res = NibblePack.unpackDoubleXOR(bufSlice, out)
res shouldEqual NibblePack.Ok
out shouldEqual inputs
}
}
}
|
tuplejump/FiloDB
|
memory/src/test/scala/filodb.memory/format/NibblePackTest.scala
|
Scala
|
apache-2.0
| 10,069 |
package benchmarks.coloredGraph.internal
import java.io.File
import java.nio.file.Files
import scala.Array.canBuildFrom
import scala.collection.mutable.HashMap
import scala.util.Random
import org.junit.runner.RunWith
import org.scalatest.FunSuite
import org.scalatest.junit.JUnitRunner
import scala.collection.mutable.HashSet
import benchmarks.coloredGraph.Color
import java.io.FileOutputStream
import java.io.BufferedOutputStream
import scala.sys.process._
import benchmarks.coloredGraph.api.SkillFile
import de.ust.skill.common.scala.api.Write
import de.ust.skill.common.scala.api.Read
import de.ust.skill.common.scala.api.Create
import benchmarks.coloredGraph.api.internal.NodePool
/**
* Write - Read - Append - Benchmark, based on the WSR'14 paper, but without sets.
*
* run with: -Xmx5G
* @author Timm Felden
*/
@RunWith(classOf[JUnitRunner])
class WRABenchmark extends FunSuite {
import benchmarks.BenchmarkTools.printGraph
final def tmpFile(s : String) = {
val r = File.createTempFile(s, ".sf")
// r.deleteOnExit
r.toPath
}
var timer = System.nanoTime;
def init {
timer = System.nanoTime;
}
case class Result(val name : String) {
val timings = HashMap[Int, Double]()
val speeds = HashMap[Int, Double]()
/**
* @param kind: none, read, write, append
*/
def end(x : Int) {
val t = System.nanoTime
if (!timings.contains(x)) {
// TODO factor this out to "reset" and add a "dryRunCount" to the test
timings.put(x, 0.0)
speeds.put(x, 0.0)
}
timings(x) += (t - timer) * 1e-9
speeds(x) += (x.toDouble * 1e-6) / ((t - timer) * 1e-9)
timer = System.nanoTime
}
def average(repetitions : Int) {
for (x ← timings.keys)
timings(x) /= repetitions.toDouble
}
}
// we use a deterministic random number generator, in order to get reproducible results over several runs
val random = new Random
val randomSeed = 31337
// set upper bound to 7 for nice results; max = 8??(fdp takes too long); reduced to 4 for test-suite
val counts = (5 to 8).map(20 << _).toArray
// results
val create = Result("create")
val write = Result("write")
val read = Result("read")
val createDot = Result("create dot")
val results = Seq(create, write, read, createDot)
// set to 10 for nice results; max = 100 (0⇀7); reduced to 1 for tests
val repetitions = 2;
def eval(test : Int ⇒ Unit) {
for (count ← 0 until repetitions) {
for (n ← counts) {
Random.setSeed(31337)
System.gc
// yield to the gc
Thread.sleep((Math.sqrt(n) * 0.3).toLong);
test(n)
print(".")
}
System.gc
System.runFinalization
Thread.sleep(4)
println(s"[${count + 1}/$repetitions]")
}
for (r ← results)
r.average(repetitions)
}
test("make wsr14 results") {
eval(task)
printGraph("total time taken", "loglogaxis", counts, results.map { r ⇒ (r.name, r.timings) })
printGraph("$\\\\frac{MObjects}{sec}$", "semilogxaxis", counts, results.map { r ⇒ (r.name, r.speeds) })
}
def task(n : Int) {
// for publication use "new File(s"out-$n.sf").toPath" instead
val f = tmpFile("wsr.append");
locally {
init;
val σ = SkillFile.open(f, Create, Write);
// n nodes, random color 0->10 (for later distribution; keep below 255 for printing)
for (i ← 0 until n)
σ.Node.make(σ.Color.make(Random.nextInt(16).toByte, Random.nextInt(16).toByte, Random.nextInt(16).toByte), HashSet())
// up to 10 random edges; distribute color over edge
val nodes = σ.Node.filter(_.getSkillID == -1)
@inline def merge(a : Color, b : Color) {
a.red = (a.red + b.red).toByte
a.green = (a.green + b.green).toByte
a.blue = (a.blue + b.blue).toByte
}
for (node ← σ.Node; i ← 0 until 16) {
val other = nodes(Random.nextInt(nodes.size))
node.edges += other
merge(node.color, other.color)
}
create.end(n);
σ.close
write.end(n);
}
locally {
val σ = SkillFile.open(f, Read, Write);
read.end(n);
// // create a dot file
// // for publication use "new File(s"out-$n.dot")" instead
// val dotFile = File.createTempFile(s"out-$n", ".dot")
// val dot = new BufferedOutputStream(new FileOutputStream(dotFile))
// @inline def put(s : String) {
// dot.write(s.getBytes())
// }
// put(s"digraph sfBenchmark$n{")
// for (n ← σ.Node) {
// put(f"""
// ${n.getSkillID}[label="",style=filled,color="#${n.color.red}%2X${n.color.green}%2X${n.color.blue}%2X"];
// ${n.getSkillID} -> ${n.edges.map(_.getSkillID).mkString("{", ";", "}")}[dir=none,color="#${n.color.red}%2X${n.color.green}%2X${n.color.blue}%2X"];""")
// }
// put("\\n}")
// dot.close
createDot.end(n)
}
}
}
|
skill-lang/skillScalaTestSuite
|
src/test/scala/benchmarks/coloredGraph/internal/WRABenchmark.scala
|
Scala
|
bsd-3-clause
| 5,023 |
object Test {
def main(args: Array[String]): Unit = {
def f(erased i: Int) = {
new Foo(i)(foo)
}
f(5)
}
def foo: Int = {
println("foo")
42
}
}
class Foo(erased a: Int)(b: Int) {
println("Foo")
}
|
som-snytt/dotty
|
tests/run-custom-args/erased/erased-7.scala
|
Scala
|
apache-2.0
| 235 |
/* Copyright (C) 2008-2016 University of Massachusetts Amherst.
This file is part of "FACTORIE" (Factor graphs, Imperative, Extensible)
http://factorie.cs.umass.edu, http://github.com/factorie
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
package cc.factorie.variable
import cc.factorie.util.DoubleSeq
import org.junit.Assert._
import org.junit.Test
import org.scalatest.junit._
class TestProportionsVariable extends JUnitSuite with cc.factorie.util.FastLogging {
@Test
def testDenseProportions1(): Unit = {
val m1 = new DenseProportions1(4, 1)
assertEquals(0.25, m1.pr(0), 0.001)
val m2 = new DenseProportions1(DoubleSeq(1.0, 1,1,1))
assertEquals(0.25, m2.pr(0), 0.001)
val m3 = new DenseProportions1(Array(1.0, 1,1,1))
assertEquals(0.25, m3.pr(0), 0.001)
}
}
class TestGrowableDenseProportions1 extends JUnitSuite with cc.factorie.util.FastLogging {
@Test
def testGrowableDenseProportions1(): Unit = {
object GrowableDomain extends CategoricalDomain[String]
val p = new GrowableDenseProportions1(GrowableDomain)
assert(p.size == 0)
GrowableDomain.value("hello")
assert(p.size == 1)
assertEquals(1.0, p(0), 0.001)
GrowableDomain.value("world")
assert(p.size == 2)
assertEquals(0.5, p(0), 0.001)
}
}
class TestGrowableUniformProportions1 extends JUnitSuite {
@Test
def testGrowableUniformProportions1(): Unit = {
object GrowableDomain extends CategoricalDomain[String]
val p = new GrowableUniformProportions1(GrowableDomain)
assert(p.size == 0)
GrowableDomain.value("hello")
assert(p.size == 1)
assertEquals(1.0, p(0), 0.001)
GrowableDomain.value("world")
assert(p.size == 2)
assertEquals(0.5, p(0), 0.001)
}
}
|
Craigacp/factorie
|
src/test/scala/cc/factorie/variable/TestProportionsVariable.scala
|
Scala
|
apache-2.0
| 2,248 |
package com.karasiq.bittorrent.dht
import scala.concurrent.duration._
import scala.language.postfixOps
import scala.util.{Failure, Random, Success}
import akka.actor.{Actor, ActorLogging, ActorRef, DeadLetterSuppression, NotInfluenceReceiveTimeout, PossiblyHarmful, Props}
import akka.pattern.{ask, pipe}
import akka.util.Timeout
import com.karasiq.bittorrent.dht.DHTMessageDispatcher.SendQuery
import com.karasiq.bittorrent.dht.DHTMessages.{DHTNodeAddress, DHTQueries, FindNodeResponse}
object DHTBucket {
// Messages
sealed trait Message
final case class AssociateNodes(nodes: Set[DHTNodeAddress]) extends Message
final case object BucketIsEmpty extends Message with NotInfluenceReceiveTimeout with DeadLetterSuppression
final case class FindNodes(id: NodeId) extends Message
object FindNodes {
sealed trait Status
final case class Success(nodes: Seq[DHTNodeAddress]) extends Status
}
final case object GetAllNodes {
sealed trait Status
final case class Success(nodes: Set[DHTNodeAddress]) extends Status
}
// Internal messages
private sealed trait InternalMessage extends Message with PossiblyHarmful
private case object RefreshNodes extends InternalMessage with NotInfluenceReceiveTimeout with DeadLetterSuppression
private final case class RemoveNode(node: DHTNodeAddress) extends InternalMessage
private final case class DeSplit(nodes: Set[DHTNodeAddress]) extends InternalMessage
// Events
sealed trait Event
// Props
def props(dhtCtx: DHTContext, start: BigInt, end: BigInt): Props = {
Props(new DHTBucket(dhtCtx, start, end))
}
}
class DHTBucket(dhtCtx: DHTContext, start: BigInt, end: BigInt) extends Actor with ActorLogging {
import context.dispatcher
import DHTBucket._
private[this] implicit val timeout = Timeout(10 seconds)
private[this] val maxNodesInBucket = 8
override def receive: Receive = receiveDefault(Set.empty)
def receiveDefault(nodes: Set[DHTNodeAddress], lastChanged: Long = System.nanoTime()): Receive = {
case AssociateNodes(addNodes) ⇒
val newNodes = nodes ++ addNodes.filter { n ⇒
val idInt = n.nodeId.toBigInt
idInt >= start && idInt <= end
}
if (canSplit(newNodes)) split(newNodes)
else context.become(receiveDefault(newNodes))
case RemoveNode(address) ⇒
val newNodes = nodes - address
if (newNodes.isEmpty) context.parent ! BucketIsEmpty
context.become(receiveDefault(newNodes))
case FindNodes(target) ⇒
val result = nodes.toVector.sortBy(_.nodeId.distanceTo(target))
sender() ! FindNodes.Success(result)
case GetAllNodes ⇒
sender() ! GetAllNodes.Success(nodes)
case RefreshNodes ⇒
val changedAgo = (System.nanoTime() - lastChanged).nanos
if (changedAgo > 15.minutes) refreshBucket(nodes)
}
def receiveSplit(half: BigInt, first: ActorRef, second: ActorRef): Receive = {
case AssociateNodes(nodes) ⇒
val (firstNodes, secondNodes) = nodes.partition(_.nodeId.toBigInt < half)
first.forward(AssociateNodes(firstNodes))
second.forward(AssociateNodes(secondNodes))
case rn @ RemoveNode(address) ⇒
if (address.nodeId.toBigInt < half) first.forward(rn)
else second.forward(rn)
case fn @ FindNodes(target) ⇒
if (target.toBigInt < half) first.forward(fn)
else second.forward(fn)
case GetAllNodes ⇒
val future = for {
GetAllNodes.Success(firstList) ← (first ? GetAllNodes).mapTo[GetAllNodes.Success]
GetAllNodes.Success(secondList) ← (second ? GetAllNodes).mapTo[GetAllNodes.Success]
} yield GetAllNodes.Success(firstList ++ secondList)
future.pipeTo(sender())
case BucketIsEmpty ⇒
(self ? GetAllNodes).mapTo[GetAllNodes.Success].foreach {
case GetAllNodes.Success(nodes) ⇒
if (nodes.size <= maxNodesInBucket) self ! DeSplit(nodes)
}
case RefreshNodes ⇒
// Ignore
case DeSplit(nodes) ⇒
context.stop(first)
context.stop(second)
context.become(receiveDefault(nodes))
}
def split(nodes: Set[DHTNodeAddress]): Unit = {
val half = (start + end) / 2
val (firstNodes, secondNodes) = nodes.partition(_.nodeId.toBigInt < half)
val firstBucket = context.actorOf(DHTBucket.props(dhtCtx, start, half))
val secondBucket = context.actorOf(DHTBucket.props(dhtCtx, half, end))
firstBucket ! AssociateNodes(firstNodes)
secondBucket ! AssociateNodes(secondNodes)
context.become(receiveSplit(half, firstBucket, secondBucket))
}
def canSplit(nodes: Set[DHTNodeAddress]): Boolean = {
(end - start) > maxNodesInBucket && nodes.size > maxNodesInBucket
}
def refreshBucket(nodes: Set[DHTNodeAddress]): Unit = {
val randomId = start + BigInt((end - start).bitLength, Random)
assert(randomId >= start && randomId <= end, "Invalid random id")
log.debug("Refreshing bucket: {}", randomId: NodeId)
nodes.toVector.foreach { nodeAddress ⇒
val future = (dhtCtx.messageDispatcher ? SendQuery(nodeAddress.address, DHTQueries.findNode(dhtCtx.selfNodeId, randomId))).mapTo[SendQuery.Status]
future.onComplete {
case Success(SendQuery.Success(FindNodeResponse.Encoded(FindNodeResponse(_, nodes)))) ⇒
log.info("Bucket refreshed: {}", nodes)
nodes
.map(na ⇒ DHTRoutingTable.AddNode(na.address))
.foreach(dhtCtx.routingTable ! _)
case Failure(_) | Success(SendQuery.Failure(_)) ⇒
self ! RemoveNode(nodeAddress)
case _ ⇒
// Ignore
}
}
}
override def preStart(): Unit = {
super.preStart()
context.system.scheduler.schedule(5 minutes, 5 minutes, self, RefreshNodes)
}
}
|
Karasiq/torrentstream
|
library/src/main/scala/com/karasiq/bittorrent/dht/DHTBucket.scala
|
Scala
|
apache-2.0
| 5,751 |
package chess
import Pos._
class HistoryTest extends ChessTest {
"threefold repetition" should {
def toHash(a: Int) = Array(a.toByte, 0.toByte, 0.toByte)
def makeHistory(positions: List[Int]) = (positions map toHash).foldLeft(History()) {
case (history, hash) => history.copy(positionHashes = history.positionHashesWith(hash))
}
"empty history" in {
History().threefoldRepetition must_== false
}
"not 3 same elements" in {
val history = makeHistory(List(1, 2, 3, 4, 5, 2, 5, 6, 23, 55))
history.threefoldRepetition must_== false
}
"not 3 elements same to the last one" in {
val history = makeHistory(List(1, 2, 3, 4, 5, 2, 5, 6, 23, 2, 55))
history.threefoldRepetition must_== false
}
"positive" in {
val history = makeHistory(List(1, 2, 3, 4, 5, 2, 5, 6, 23, 2))
history.threefoldRepetition must_== true
}
}
}
|
psuter/scalachess
|
src/test/scala/HistoryTest.scala
|
Scala
|
mit
| 910 |
/*
* Licensed to Intel Corporation under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* Intel Corporation licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.nn
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl._
import org.scalatest.{FlatSpec, Matchers}
@com.intel.analytics.bigdl.tags.Parallel
class SpatialCrossMapLRNSpec extends FlatSpec with Matchers {
private def referenceLRNForwardAcrossChannels
(input: Tensor[Double], alpha: Double, beta: Double, size: Int): Tensor[Double] = {
val output = Tensor[Double]()
output.resizeAs(input)
val batch = input.size(1)
val channel = input.size(2)
val height = input.size(3)
val width = input.size(4)
for (n <- 0 until batch) {
for (c <- 0 until channel) {
for (h <- 0 until height) {
for (w <- 0 until width) {
var cStart = c - (size - 1) / 2
val cEnd = math.min(cStart + size, channel)
cStart = math.max(cStart, 0)
var scale = 1.0
for (i <- cStart until cEnd) {
val value = input.valueAt(n + 1, i + 1, h + 1, w + 1)
scale += value * value * alpha / size
}
output.setValue(n + 1, c + 1, h + 1, w + 1,
input.valueAt(n + 1, c + 1, h + 1, w + 1) * math.pow(scale, -beta))
}
}
}
}
output
}
private def referenceLRNForwardAcrossChannels
(input: Tensor[Float], alpha: Float, beta: Float, size: Int): Tensor[Float] = {
val output = Tensor[Float]()
output.resizeAs(input)
val batch = input.size(1)
val channel = input.size(2)
val height = input.size(3)
val width = input.size(4)
for (n <- 0 until batch) {
for (c <- 0 until channel) {
for (h <- 0 until height) {
for (w <- 0 until width) {
var cStart = c - (size - 1) / 2
val cEnd = math.min(cStart + size, channel)
cStart = math.max(cStart, 0)
var scale = 1.0f
for (i <- cStart until cEnd) {
val value = input.valueAt(n + 1, i + 1, h + 1, w + 1)
scale += value * value * alpha / size
}
output.setValue(n + 1, c + 1, h + 1, w + 1,
input.valueAt(n + 1, c + 1, h + 1, w + 1) * math.pow(scale, -beta).toFloat)
}
}
}
}
output
}
"LocalNormalizationAcrossChannels Forward Double" should "be correct" in {
val layer = new SpatialCrossMapLRN[Double](5, 0.0001, 0.75, 1.0)
val input = Tensor[Double](2, 7, 3, 3)
input.rand()
val outputRef = referenceLRNForwardAcrossChannels(input, 0.0001, 0.75, 5)
layer.forward(input)
val output = layer.forward(input)
output should be(outputRef)
}
"LocalNormalizationAcrossChannels Backward Double" should "be correct" in {
val layer = new SpatialCrossMapLRN[Double](5, 0.0001, 0.75, 1.0)
val input = Tensor[Double](2, 7, 3, 3)
input.rand()
val checker = new GradientChecker(1e-2, 1e-2)
checker.checkLayer(layer, input) should be(true)
}
"LocalNormalizationAcrossChannels Backward Float" should "be correct" in {
val layer = new SpatialCrossMapLRN[Float](5, 0.0001, 0.75, 1.0)
val input = Tensor[Float](2, 7, 3, 3)
input.rand()
val checker = new GradientChecker(1e-2, 1e-2)
checker.checkLayer[Float](layer, input) should be(true)
}
"LocalNormalizationAcrossChannels with Large Region Backward Double" should "be correct" in {
val layer = new SpatialCrossMapLRN[Double](15, 0.0001, 0.75, 1.0)
val input = Tensor[Double](2, 7, 3, 3)
input.rand()
val checker = new GradientChecker(1e-2, 1e-2)
checker.checkLayer(layer, input) should be(true)
}
"LocalNormalizationAcrossChannels with Large Region Backward Float" should "be correct" in {
val layer = new SpatialCrossMapLRN[Float](15, 0.0001, 0.75, 1.0)
val input = Tensor[Float](2, 7, 3, 3)
input.rand()
val checker = new GradientChecker(1e-2, 1e-2)
checker.checkLayer(layer, input) should be(true)
}
"LocalNormalizationAcrossChannels with Large Region Forward Double" should "be correct" in {
val layer = new SpatialCrossMapLRN[Double](15, 0.0001, 0.75, 1.0)
val input = Tensor[Double](2, 7, 3, 3)
input.rand()
val outputRef = referenceLRNForwardAcrossChannels(input, 0.0001, 0.75, 15)
val output = layer.forward(input)
output should be(outputRef)
}
"LocalNormalizationAcrossChannels Forward Float" should "be correct" in {
val layer = new SpatialCrossMapLRN[Float](5, 0.0001f, 0.75f, 1.0f)
val input = Tensor[Float](2, 7, 3, 3)
input.rand()
val outputRef = referenceLRNForwardAcrossChannels(input, 0.0001f, 0.75f, 5)
val output = layer.forward(input)
output should be(outputRef)
}
"LocalNormalizationAcrossChannels with Large Region Forward Float" should "be correct" in {
val layer = new SpatialCrossMapLRN[Float](15, 0.0001f, 0.75f, 1.0f)
val input = Tensor[Float](2, 7, 3, 3)
input.rand()
val outputRef = referenceLRNForwardAcrossChannels(input, 0.0001f, 0.75f, 15)
val output = layer.forward(input)
output should be(outputRef)
}
}
|
SeaOfOcean/BigDL
|
dl/src/test/scala/com/intel/analytics/bigdl/nn/SpatialCrossMapLRNSpec.scala
|
Scala
|
apache-2.0
| 5,862 |
/*
* Copyright (c) 2014-2016
* nonblocking.at gmbh [http://www.nonblocking.at]
*
* This file is part of Cliwix.
*
* Cliwix is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package at.nonblocking.cliwix.core.transaction
import at.nonblocking.cliwix.core.CliwixException
private[transaction] class CliwixRollbackException extends CliwixException("Rollback trigger") {
}
|
nonblocking/cliwix
|
cliwix-core/src/main/scala/at/nonblocking/cliwix/core/transaction/CliwixRollbackException.scala
|
Scala
|
agpl-3.0
| 983 |
/* Copyright 2017-19, Emmanouil Antonios Platanios. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.platanios.tensorflow.jni
/**
* @author Emmanouil Antonios Platanios
*/
object Session {
TensorFlow.load()
@native def allocate(graphHandle: Long, target: String, configProto: Array[Byte]): Long
@native def delete(handle: Long): Unit
// TODO: [SESSION] "listDevices".
// TODO: [SESSION] Add TPU support using the experimental C API.
/** Executes a computation in a session.
*
* @param handle Handle to the native TensorFlow session object.
* @param runOptions Serialized representation of a `RunOptions` protocol buffer, or `null`.
* @param inputOpHandles See `inputOpIndices`.
* @param inputOpIndices See `inputTensorHandles`.
* @param inputTensorHandles Together with `inputOpHandles` and `inputOpIndices` this array specifies the values
* that are being "fed" (do not need to be computed) during graph execution.
* `inputTensorHandles(i)` (which corresponds to a `Tensor.nativeHandle`) is considered to
* be the `inputOpIndices(i)`-th output of the operation `inputOpHandles(i)`. Thus, it is
* required that
* `inputOpHandles.length == inputOpIndices.length == inputTensorHandles.length`.
* @param outputOpHandles (see outputOpIndices)
* @param outputOpIndices together with outputOpHandles identifies the set of values that should
* be computed. The `outputOpIndices(i)`-th output of the operation `outputOpHandles(i)`.
* It is required that `outputOpHandles.length == outputOpIndices.length`.
* @param targetOpHandles Set of operations in the graph that are to be executed but whose output will not be
* returned.
* @param wantRunMetadata Boolean variable that indicates whether metadata about this execution should be
* returned.
* @param outputTensorHandles Array that will be filled in with handles to the outputs requested. It is required that
* `outputTensorHandles.length == outputOpHandles.length`.
* @return Serialized representation of the `RunMetadata` protocol buffer, or `null` if `wantRunMetadata` is `false`.
*/
@native def run(
handle: Long,
runOptions: Array[Byte],
inputTensorHandles: Array[Long],
inputOpHandles: Array[Long],
inputOpIndices: Array[Int],
outputOpHandles: Array[Long],
outputOpIndices: Array[Int],
targetOpHandles: Array[Long],
wantRunMetadata: Boolean,
outputTensorHandles: Array[Long]): Array[Byte]
// @native def extend(handle: Long): Unit
@native def deviceList(configProto: Array[Byte]): Array[Array[Byte]]
}
|
eaplatanios/tensorflow_scala
|
modules/jni/src/main/scala/org/platanios/tensorflow/jni/Session.scala
|
Scala
|
apache-2.0
| 3,498 |
object ScalaSkeleton {
def main(args: Array[String]): Unit = {
println("Hello friend.")
}
}
|
FunTimeCoding/scala-skeleton
|
src/ScalaSkeleton.scala
|
Scala
|
mit
| 100 |
package skuber.util
import java.io.InputStreamReader
import java.util
import org.yaml.snakeyaml.constructor.SafeConstructor
import play.api.libs.json.{JsObject, Json}
/**
* @author David O'Riordan
*
* This supports reading of resources represented as Yaml into one or more Json objects.
*/
object YamlReader {
/*
* Convert YAML string to JSON string representation
*/
private def yamlStringToJsonString(yamlStr: String) = {
import com.fasterxml.jackson.databind.ObjectMapper
import com.fasterxml.jackson.dataformat.yaml.YAMLFactory
val yamlReader = new ObjectMapper(new YAMLFactory)
val obj = yamlReader.readValue(yamlStr, classOf[Object])
val jsonWriter = new ObjectMapper()
jsonWriter.writeValueAsString(obj)
}
def read(is: InputStreamReader): Seq[JsObject] = {
import org.yaml.snakeyaml.Yaml
import org.yaml.snakeyaml.constructor.SafeConstructor
import scala.collection.JavaConverters._
val yaml = new Yaml(new SafeConstructor)
val yamlDocs = yaml.loadAll(is).iterator.asScala
yamlDocs.map { yamlDoc =>
val yaml=new Yaml
val outputYamlStr = yaml.dump(yamlDoc)
val json = yamlStringToJsonString(outputYamlStr)
Json.parse(json).as[JsObject]
}.toSeq
}
}
|
doriordan/skuber-util
|
src/main/scala/skuber/util/YamlReader.scala
|
Scala
|
apache-2.0
| 1,260 |
package org.sisioh.aws4s.s3.model
import java.util.Date
import com.amazonaws.services.s3.model.CompleteMultipartUploadResult
import org.sisioh.aws4s.PimpedType
object CompleteMultipartUploadResultFactory {
def create(): CompleteMultipartUploadResult = new CompleteMultipartUploadResult()
}
class RichCompleteMultipartUploadResult(val underlying: CompleteMultipartUploadResult)
extends AnyVal with PimpedType[CompleteMultipartUploadResult] {
def bucketNameOpt: Option[String] = Option(underlying.getBucketName)
def bucketNameOpt_=(value: Option[String]): Unit =
underlying.setBucketName(value.orNull)
// ---
def keyOpt: Option[String] = Option(underlying.getKey)
def keyOpt_=(value: Option[String]): Unit =
underlying.setKey(value.orNull)
// ---
def locationOpt = Option(underlying.getLocation)
def locationOpt_=(value: Option[String]): Unit =
underlying.setLocation(value.orNull)
// ---
def eTagOpt: Option[String] = Option(underlying.getETag)
def eTagOpt_=(value: Option[String]): Unit =
underlying.setETag(value.orNull)
// ---
def versionIdOpt: Option[String] = Option(underlying.getVersionId)
def versionIdOpt_=(value: Option[String]): Unit =
underlying.setVersionId(value.orNull)
// ---
def expirationTimeOpt: Option[Date] = Option(underlying.getExpirationTime)
def expirationTimeOpt_=(value: Option[Date]): Unit =
underlying.setExpirationTime(value.orNull)
// ---
def expirationTimeRuleIdOpt: Option[String] = Option(underlying.getExpirationTimeRuleId)
def expirationTimeRuleIdOpt_=(value: Option[String]): Unit =
underlying.setExpirationTimeRuleId(value.orNull)
}
|
everpeace/aws4s
|
aws4s-s3/src/main/scala/org/sisioh/aws4s/s3/model/RichCompleteMultipartUploadResult.scala
|
Scala
|
mit
| 1,671 |
package lila.memo
import com.typesafe.config.Config
import lila.db.dsl._
final class Env(config: Config, db: lila.db.Env) {
private val CollectionCache = config getString "collection.cache"
lazy val mongoCache: MongoCache.Builder = MongoCache(db(CollectionCache))
}
object Env {
lazy val current = "memo" boot new Env(
lila.common.PlayApp loadConfig "memo",
lila.db.Env.current)
}
|
clarkerubber/lila
|
modules/memo/src/main/Env.scala
|
Scala
|
agpl-3.0
| 401 |
package suiryc.scala.misc
/* XXX - handle time system (separate for human readable form, or introduce 'cumulative' notion ?) */
/* XXX - handle floating points in human representation ? */
object Units {
case class Unit(label: String, factor: Long)
abstract class AbstractSystem(unityLabel: String) {
val unity = Unit(unityLabel, 1)
private val ValueRegexp = """^([0-9]*)\\s*([a-zA-Z]*)$""".r
def units: List[List[Unit]] = Nil
def fromHumanReadable(value: String): Long = value match {
case ValueRegexp(value, valueUnit) =>
val lcunit = valueUnit.toLowerCase
def get(units: List[Unit]): Option[Long] =
units find { unit =>
unit.label.toLowerCase == lcunit
} map { unit =>
value.toLong * unit.factor
}
if ((lcunit == "") || (lcunit == unityLabel.toLowerCase)) value.toLong
else units.foldLeft(None:Option[Long]) { (result, units) =>
result orElse get(units)
} getOrElse(
throw new IllegalArgumentException(s"Invalid value[$value]")
)
case _ =>
throw new IllegalArgumentException(s"Invalid value[$value]")
}
def toHumanReadable(value: Long, runits: List[Unit] = units.head) = {
@scala.annotation.tailrec
def loop(units: List[Unit]): (Long, Unit) = units match {
case head :: tail =>
if (value < 2 * head.factor) loop(tail)
else (value, head)
case Nil =>
(value, unity)
}
val (hr, unit) = loop(runits.reverse)
s"${hr / unit.factor} ${unit.label}"
}
}
trait SI extends AbstractSystem {
/* Note: 'K' is reserved for 'Kelvin' */
val kilo = Unit(s"k${unity.label}", 1000L)
val mega = Unit(s"M${unity.label}", 1000L * 1000L)
val giga = Unit(s"G${unity.label}", 1000L * 1000L * 1000L)
val tera = Unit(s"T${unity.label}", 1000L * 1000L * 1000L * 1000L)
val peta = Unit(s"P${unity.label}", 1000L * 1000L * 1000L * 1000L * 1000L)
def units_SI = List(kilo, mega, giga, tera, peta)
override def units = super.units :+ units_SI
}
trait Binary extends AbstractSystem {
val kibi = Unit(s"Ki${unity.label}", 1024L)
val mebi = Unit(s"Mi${unity.label}", 1024L * 1024L)
val gibi = Unit(s"Gi${unity.label}", 1024L * 1024L * 1024L)
val tebi = Unit(s"Ti${unity.label}", 1024L * 1024L * 1024L * 1024L)
val pebi = Unit(s"Pi${unity.label}", 1024L * 1024L * 1024L * 1024L * 1024L)
def units_Binary = List(kibi, mebi, gibi, tebi, pebi)
override def units = super.units :+ units_Binary
}
object storage
extends AbstractSystem("B")
with Binary
with SI
}
|
swhgoon/suiryc-scala
|
core/src/main/scala/suiryc/scala/misc/Units.scala
|
Scala
|
gpl-3.0
| 2,680 |
package org.jetbrains.plugins.scala
package debugger.evaluation
import com.intellij.debugger.codeinsight.RuntimeTypeEvaluator
import com.intellij.debugger.engine.ContextUtil
import com.intellij.debugger.engine.evaluation.{CodeFragmentKind, TextWithImportsImpl, EvaluationContextImpl}
import com.intellij.debugger.engine.evaluation.expression.ExpressionEvaluator
import com.intellij.debugger.impl.DebuggerContextImpl
import com.intellij.debugger.{DebuggerBundle, DebuggerInvocationUtil, EvaluatingComputable}
import com.intellij.openapi.application.{AccessToken, ReadAction}
import com.intellij.openapi.editor.Editor
import com.intellij.openapi.progress.ProgressIndicator
import com.intellij.openapi.project.Project
import com.intellij.openapi.util.Key
import com.intellij.psi._
import com.intellij.psi.search.GlobalSearchScope
import com.sun.jdi.{ClassType, Type, Value}
import org.jetbrains.annotations.Nullable
import org.jetbrains.plugins.scala.debugger.evaluation.ScalaRuntimeTypeEvaluator._
import org.jetbrains.plugins.scala.debugger.evaluation.util.DebuggerUtil
import org.jetbrains.plugins.scala.lang.psi.api.expr.ScExpression
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.ScModifierListOwner
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.ScObject
import org.jetbrains.plugins.scala.lang.psi.impl.ScalaPsiManager
import org.jetbrains.plugins.scala.lang.psi.types.ScType
import org.jetbrains.plugins.scala.lang.psi.types.ScType.ExtractClass
/**
* Nikolay.Tropin
* 8/8/13
*/
abstract class ScalaRuntimeTypeEvaluator(@Nullable editor: Editor, expression: PsiElement, context: DebuggerContextImpl, indicator: ProgressIndicator)
extends RuntimeTypeEvaluator(editor, expression, context, indicator) {
override def evaluate(evaluationContext: EvaluationContextImpl): PsiClass = {
val project: Project = evaluationContext.getProject
val evaluator: ExpressionEvaluator = DebuggerInvocationUtil.commitAndRunReadAction(project, new EvaluatingComputable[ExpressionEvaluator] {
def compute: ExpressionEvaluator = {
val textWithImports = new TextWithImportsImpl(CodeFragmentKind.CODE_BLOCK, expression.getText)
val codeFragment = new ScalaCodeFragmentFactory().createCodeFragment(textWithImports, expression, project)
ScalaEvaluatorBuilder.build(codeFragment, ContextUtil.getSourcePosition(evaluationContext))
}
})
val value: Value = evaluator.evaluate(evaluationContext)
if (value != null) {
getCastableRuntimeType(project, value)
} else throw EvaluationException(DebuggerBundle.message("evaluation.error.surrounded.expression.null"))
}
}
object ScalaRuntimeTypeEvaluator {
val KEY: Key[ScExpression => ScType] = Key.create("SCALA_RUNTIME_TYPE_EVALUATOR")
def getCastableRuntimeType(project: Project, value: Value): PsiClass = {
val unwrapped = DebuggerUtil.unwrapScalaRuntimeObjectRef(value)
val jdiType: Type = unwrapped.asInstanceOf[Value].`type`
var psiClass: PsiClass = findPsiClass(project, jdiType)
if (psiClass != null) {
return psiClass
}
jdiType match {
case classType: ClassType =>
val superclass: ClassType = classType.superclass
val stdTypeNames = Seq("java.lang.Object", "scala.Any", "scala.AnyRef", "scala.AnyVal")
if (superclass != null && !stdTypeNames.contains(superclass.name)) {
psiClass = findPsiClass(project, superclass)
if (psiClass != null) {
return psiClass
}
}
import scala.collection.JavaConversions._
classType.interfaces.map(findPsiClass(project, _)).find(_ != null).orNull
case _ => null
}
}
private def findPsiClass(project: Project, jdiType: Type): PsiClass = {
val token: AccessToken = ReadAction.start
try {
new ScalaPsiManager(project).getCachedClass(GlobalSearchScope.allScope(project), jdiType.name())
}
finally {
token.finish()
}
}
def isSubtypeable(scType: ScType): Boolean = {
scType match {
case ExtractClass(psiClass) =>
psiClass match {
case _: ScObject => false
case owner: ScModifierListOwner => !owner.hasFinalModifier
case _ if scType.isInstanceOf[PsiPrimitiveType] => false
case _ => !psiClass.hasModifierProperty(PsiModifier.FINAL)
}
case _ => false
}
}
}
|
triggerNZ/intellij-scala
|
src/org/jetbrains/plugins/scala/debugger/evaluation/ScalaRuntimeTypeEvaluator.scala
|
Scala
|
apache-2.0
| 4,379 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.audit.handler
import org.slf4j.{Logger, LoggerFactory}
import play.api.libs.json.JsValue
import uk.gov.hmrc.audit.HandlerResult
import scala.concurrent.{ExecutionContext, Future}
class LoggingHandler(logger: Logger) extends AuditHandler {
private val ErrorKey = "DS_EventMissed_AuditRequestFailure"
def makeFailureMessage(event: JsValue): String =
s"$ErrorKey : audit item : ${event.toString}"
override def sendEvent(event: JsValue)(implicit ec: ExecutionContext): Future[HandlerResult] = {
val message = makeFailureMessage(event)
logger.warn(message)
Future.successful(HandlerResult.Success)
}
}
object LoggingHandler extends LoggingHandler(LoggerFactory.getLogger(getClass))
|
hmrc/play-auditing
|
src-common/main/scala/uk/gov/hmrc/audit/handler/LoggingHandler.scala
|
Scala
|
apache-2.0
| 1,333 |
import Stream._
sealed trait Stream[+A] {
def headOption: Option[A] = this match {
case Empty => None
case Cons(h, t) => Some(h())
}
// EX1
def toList: List[A] = this match {
case Cons(h, t) => h() :: t().toList
case Empty => Nil
}
// EX2
def take(n: Int): Stream[A] = {
def go(s: Stream[A], _n: Int): Stream[A] = s match {
case _ if _n <= 0 => Empty
case Cons(h, t) => cons(h(), go(t(), _n-1))
case Empty => Empty
}
go(this, n)
}
def drop(n: Int): Stream[A] = this match {
case Empty => Empty
case c if n <= 0 => c
case Cons(h, t) => t().drop(n-1)
}
// EX3
def takeWhile(p: A => Boolean): Stream[A] = this match {
case Cons(h, t) => if(p(h())) cons(h(), t().takeWhile(p))
else Empty
case Empty => Empty
}
def exists(p: A => Boolean): Boolean = this match {
case Cons(h, t) => p(h()) || t().exists(p)
case _ => false
}
def foldRight[B](z: => B)(f: (A, => B) => B): B =
this match {
case Cons(h, t) => f(h(), t().foldRight(z)(f))
case _ => z
}
def exists2(p: A => Boolean): Boolean =
foldRight(false)((a, b) => p(a) || b)
// EX4
def forAll(p: A => Boolean): Boolean = this match {
case Cons(h, t) => p(h()) && t().forAll(p)
case _ => true
}
def forAll2(p: A => Boolean): Boolean =
foldRight(true)((a, b) => p(a) && b)
// EX5
def takeWhile2(p: A => Boolean): Stream[A] =
foldRight(empty[A])((a,b) => if (p(a)) cons(a, b) else empty)
// EX6
def headOption2: Option[A] =
foldRight(None: Option[A])((h,_) => Some(h))
// EX7
def map[B](f: A => B): Stream[B] =
foldRight(empty[B])((a,b) => cons(f(a), b))
def filter(p: A => Boolean): Stream[A] =
foldRight(empty[A])((a,b) =>
if(p(a)) cons(a, b)
else b)
def append[B>:A](s: => Stream[B]): Stream[B] =
foldRight(s)(cons(_,_))
def flatMap[B](f: A => Stream[B]): Stream[B] =
foldRight(empty[B])((a,b) => f(a) append b)
def find(p: A => Boolean): Option[A] =
filter(p).headOption
// EX13
def map2[B](f: A => B): Stream[B] =
unfold(this) {
case Cons(h, t) => Some((f(h()), t()))
case Empty => None
}
def take2(n: Int): Stream[A] =
unfold((this,n)) {
case (Cons(h, _), 1) => Some((h(), (empty[A], 0)))
case (Cons(h, t), n) if n > 1 => Some((h(), (t(), n-1)))
case _ => None
}
def takeWhile3(p: A => Boolean): Stream[A] =
unfold(this) {
case Cons(h, t) => if(p(h())) Some((h(), t()))
else None
case _ => None
}
def zipWith[B,C](s2: Stream[B])(f: (A,B) => C): Stream[C] =
unfold((this, s2)) {
case (Empty, _) => None
case (_, Empty) => None
case (Cons(h1, t1), Cons(h2,t2)) => Some((f(h1(),h2()), (t1(),t2())))
}
def zipAll[B](s2: Stream[B]): Stream[(Option[A],Option[B])] =
unfold((this, s2)) {
case (Empty, Empty) => None
case (Empty, Cons(h2,t2)) => Some(((None,Some(h2())),(empty[A], t2())))
case (Cons(h1,t1), Empty) => Some(((Some(h1()), None),(t1(), empty[B])))
case (Cons(h1,t1), Cons(h2,t2)) =>
Some(((Some(h1()),Some(h2())),(t1(),t2())))
}
// EX14
def startsWith[A](s: Stream[A]): Boolean =
zipAll(s).takeWhile(!_._2.isEmpty) forAll {
case (h,h2) => h == h2
}
// EX15
def tails: Stream[Stream[A]] =
unfold(this) {
case Empty => None
case Cons(h,t) => Some((Cons(h,t), t()))
} append Stream(empty)
def hasSubsequence[A](s: Stream[A]): Boolean =
tails exists (_ startsWith s)
// EX16
def scanRight[B](z: B)(f: (A,B) => B): Stream[B] = {
this match {
case Empty => consz
case Cons(h,t) => t().scanRight(f(h(),z))(f)
}
}
}
println(Stream(1,2,3).tails.map(_.toList).toList)
//println(Stream(1,2,3).zipAll(Stream(1)).toList)
//println(Stream(1,2,4,4).zipWith(Stream(1,2,4,4))(_ + _).toList)
//println(Stream(1,2,3).toList)
//println(Stream(1,2,3).take(2).toList)
//println(Stream(1,2,3).take2(2).toList)
//println(Stream(1,2,3,4).drop(2).toList)
//println(Stream(1,2,3,4).takeWhile(_ < 3).toList)
//println(Stream(1,2,3,4).takeWhile3(_ <= 3).toList)
//println(Stream(1,2,3,4).forAll(_ < 5))
//println(Stream(1,2,3,4).forAll2(_ < 5))
//println(Stream(1,2,3,4).takeWhile2(_ < 3).toList)
//println(Stream(1,2,3,4).headOption2)
//println(Empty.headOption2)
//println(Stream(1,2,3,4).map(_ + 1).toList)
//println(Stream(1,2,3,4).map2(_ + 1).toList)
//println(Stream(1,2,3,4).filter(_ % 2 == 0).toList)
//println(Stream(1,2,3,4).append(Stream(1,2,3)).toList)
case object Empty extends Stream[Nothing]
case class Cons[+A](h: () => A, t: () => Stream[A]) extends Stream[A]
object Stream {
def cons[A](hd: => A, tl: => Stream[A]): Stream[A] = {
lazy val head = hd
lazy val tail = tl
Cons(() => head, () => tail)
}
def empty[A]: Stream[A] = Empty
def apply[A](as: A*): Stream[A] =
if(as.isEmpty) empty else cons(as.head, apply(as.tail: _*))
// EX8
def constant[A](a: A): Stream[A] = {
lazy val ct:Stream[A] = Cons(() => a, () => ct)
ct
}
// EX9
def from(n: Int): Stream[Int] = {
var _n: Int = n
lazy val n_strm: Stream[Int] = Cons(
() => _n, () => {_n +=1; n_strm})
n_strm
}
def from2(n: Int): Stream[Int] =
cons(n, from(n+1))
// EX10
def fibs: Stream[Int] = {
def go(n: Int, n1: Int): Stream[Int] =
cons(n, go(n1, n+n1))
go(0,1)
}
// EX11
def unfold[A, S](z: S)(f: S => Option[(A, S)]): Stream[A] = f(z) match {
case Some((h, s)) => cons(h, unfold(s)(f))
case None => empty[A]
}
// EX12
def fibs2: Stream[Int] =
unfold((0,1)) { case (f0, f1) => Some((f0,(f1,f0+f1))) }
//def from3(n: Int): Stream[Int] =
// unfold(cons(n, empty[Int]))((s:Stream[Int]) => Some(n+1, s))
def from3(n: Int) =
unfold(n)(n => Some((n, n+1)))
//def constant2[A](a: A): Stream[A] =
// unfold(cons(a, empty[A]))((s:Stream[A]) => Some(a, s))
def constant2[A](a: A) =
unfold(a)(_ => Some((a,a)))
//def ones2: Stream[Int] =
// unfold(cons(1, empty[Int]))((s:Stream[Int]) => Some(1, s))
def ones2: Stream[Int] =
unfold(1)(_ => Some((1,1)))
}
//println(constant(2).take(5).toList)
//println(from(2).take(5).toList)
//println(fibs.take(7).toList)
//println(fibs2.take(7).toList)
//println(constant2(2).take(5).toList)
//println(from3(2).take(5).toList)
//println(ones2.take(5).toList)
|
marcos-sb/scala-exercises
|
ch5-stream.scala
|
Scala
|
gpl-2.0
| 6,451 |
package hammock
package hi
import java.time.ZonedDateTime
import cats._
import cats.implicits._
import hammock.hi.Cookie.SameSite
import hammock.hi.platformspecific._
import monocle.Optional
import monocle.macros.Lenses
@Lenses case class Cookie(
name: String,
value: String,
expires: Option[ZonedDateTime] = None,
maxAge: Option[Int] = None,
domain: Option[String] = None,
path: Option[String] = None,
secure: Option[Boolean] = None,
httpOnly: Option[Boolean] = None,
sameSite: Option[SameSite] = None,
custom: Option[Map[String, String]] = None
)
object Cookie {
val expiresOpt: Optional[Cookie, ZonedDateTime] = Optional[Cookie, ZonedDateTime] {
_.expires
} { date =>
{
case cookie @ Cookie(_, _, None, _, _, _, _, _, _, _) => cookie
case cookie @ _ => cookie.copy(expires = Some(date))
}
}
val maxAgeOpt: Optional[Cookie, Int] = Optional[Cookie, Int] {
_.maxAge
} { age =>
{
case cookie @ Cookie(_, _, _, None, _, _, _, _, _, _) => cookie
case cookie @ _ => cookie.copy(maxAge = Some(age))
}
}
val domainOpt: Optional[Cookie, String] = Optional[Cookie, String] {
_.domain
} { domain =>
{
case cookie @ Cookie(_, _, _, _, None, _, _, _, _, _) => cookie
case cookie @ _ => cookie.copy(domain = Some(domain))
}
}
val pathOpt: Optional[Cookie, String] = Optional[Cookie, String] {
_.path
} { path =>
{
case cookie @ Cookie(_, _, _, _, _, None, _, _, _, _) => cookie
case cookie @ _ => cookie.copy(path = Some(path))
}
}
val secureOpt: Optional[Cookie, Boolean] = Optional[Cookie, Boolean] {
_.secure
} { secure =>
{
case cookie @ Cookie(_, _, _, _, _, _, None, _, _, _) => cookie
case cookie @ _ => cookie.copy(secure = Some(secure))
}
}
val httpOnlyOpt: Optional[Cookie, Boolean] = Optional[Cookie, Boolean] {
_.httpOnly
} { httpOnly =>
{
case cookie @ Cookie(_, _, _, _, _, _, _, None, _, _) => cookie
case cookie @ _ => cookie.copy(httpOnly = Some(httpOnly))
}
}
val sameSiteOpt: Optional[Cookie, SameSite] = Optional[Cookie, SameSite] {
_.sameSite
} { sameSite =>
{
case cookie @ Cookie(_, _, _, _, _, _, _, _, None, _) => cookie
case cookie @ _ => cookie.copy(sameSite = Some(sameSite))
}
}
val customOpt: Optional[Cookie, Map[String, String]] = Optional[Cookie, Map[String, String]] {
_.custom
} { custom =>
{
case cookie @ Cookie(_, _, _, _, _, _, _, _, _, None) => cookie
case cookie @ _ => cookie.copy(custom = Some(custom))
}
}
sealed trait SameSite
object SameSite {
case object Strict extends SameSite
case object Lax extends SameSite
implicit val sameSiteShow = new Show[SameSite] {
def show(s: SameSite): String = s match {
case Strict => "Strict"
case Lax => "Lax"
}
}
implicit val sameSiteEq = new Eq[SameSite] {
def eqv(a: SameSite, b: SameSite): Boolean = (a, b) match {
case (Strict, Strict) => true
case (Lax, Lax) => true
case _ => false
}
}
}
implicit val cookieShow = new Show[Cookie] {
def show(cookie: Cookie): String = render(cookie)
}
implicit val cookieEq: Eq[Cookie] = new Eq[Cookie] {
def eqv(a: Cookie, b: Cookie): Boolean = {
a.name === b.name &&
a.value === b.value &&
a.expires.equals(b.expires) &&
a.maxAge === b.maxAge &&
a.domain === b.domain &&
a.path === b.path &&
a.secure === b.secure &&
a.httpOnly === b.httpOnly &&
a.sameSite === b.sameSite &&
a.custom === b.custom
}
}
/**
* renders a cookie in the Cookie header format
* see: https://tools.ietf.org/html/rfc6265#section-5.4
*/
def render(cookie: Cookie)(implicit fmt: DateFormatter): String = {
def renderPair[S: Show](k: String)(v: S) = k ++ "=" ++ Show[S].show(v)
def maybeShowDate(date: Option[ZonedDateTime]): Option[String] = date map (date => fmt.format(date))
def expires = maybeShowDate(cookie.expires) map renderPair("Expires")
def maxAge = cookie.maxAge map renderPair("MaxAge")
def domain = cookie.domain map renderPair("Domain")
def path = cookie.path map renderPair("Path")
def secure = cookie.secure map renderPair("Secure")
def httpOnly = cookie.httpOnly map renderPair("HttpOnly")
def sameSite = cookie.sameSite map renderPair("SameSite")
val maybes = List(expires, maxAge, domain, path, secure, httpOnly, sameSite)
.filter(_.nonEmpty)
.map(_.get)
val custom: List[String] = cookie.custom match {
case None => Nil
case Some(elems) => elems.map { case (k, v) => renderPair(k)(v) } toList
}
(s"${renderPair(cookie.name)(cookie.value)}" :: maybes ::: custom).mkString("; ")
}
}
|
pepegar/hammock
|
core/src/main/scala/hammock/hi/Cookie.scala
|
Scala
|
mit
| 5,565 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.expressions
import java.util.Locale
import org.apache.spark.sql.catalyst.analysis.{TypeCheckResult, UnresolvedException}
import org.apache.spark.sql.catalyst.analysis.TypeCheckResult.{TypeCheckFailure, TypeCheckSuccess}
import org.apache.spark.sql.catalyst.dsl.expressions._
import org.apache.spark.sql.catalyst.expressions.aggregate.{AggregateFunction, DeclarativeAggregate, NoOp}
import org.apache.spark.sql.types._
/**
* The trait of the Window Specification (specified in the OVER clause or WINDOW clause) for
* Window Functions.
*/
sealed trait WindowSpec
/**
* The specification for a window function.
*
* @param partitionSpec It defines the way that input rows are partitioned.
* @param orderSpec It defines the ordering of rows in a partition.
* @param frameSpecification It defines the window frame in a partition.
*/
case class WindowSpecDefinition(
partitionSpec: Seq[Expression],
orderSpec: Seq[SortOrder],
frameSpecification: WindowFrame) extends Expression with WindowSpec with Unevaluable {
override def children: Seq[Expression] = partitionSpec ++ orderSpec :+ frameSpecification
override lazy val resolved: Boolean =
childrenResolved && checkInputDataTypes().isSuccess &&
frameSpecification.isInstanceOf[SpecifiedWindowFrame]
override def nullable: Boolean = true
override def foldable: Boolean = false
override def dataType: DataType = throw new UnsupportedOperationException("dataType")
override def checkInputDataTypes(): TypeCheckResult = {
frameSpecification match {
case UnspecifiedFrame =>
TypeCheckFailure(
"Cannot use an UnspecifiedFrame. This should have been converted during analysis. " +
"Please file a bug report.")
case f: SpecifiedWindowFrame if f.frameType == RangeFrame && !f.isUnbounded &&
orderSpec.isEmpty =>
TypeCheckFailure(
"A range window frame cannot be used in an unordered window specification.")
case f: SpecifiedWindowFrame if f.frameType == RangeFrame && f.isValueBound &&
orderSpec.size > 1 =>
TypeCheckFailure(
s"A range window frame with value boundaries cannot be used in a window specification " +
s"with multiple order by expressions: ${orderSpec.mkString(",")}")
case f: SpecifiedWindowFrame if f.frameType == RangeFrame && f.isValueBound &&
!isValidFrameType(f.valueBoundary.head.dataType) =>
TypeCheckFailure(
s"The data type '${orderSpec.head.dataType.catalogString}' used in the order " +
"specification does not match the data type " +
s"'${f.valueBoundary.head.dataType.catalogString}' which is used in the range frame.")
case _ => TypeCheckSuccess
}
}
override def sql: String = {
def toSql(exprs: Seq[Expression], prefix: String): Seq[String] = {
Seq(exprs).filter(_.nonEmpty).map(_.map(_.sql).mkString(prefix, ", ", ""))
}
val elements =
toSql(partitionSpec, "PARTITION BY ") ++
toSql(orderSpec, "ORDER BY ") ++
Seq(frameSpecification.sql)
elements.mkString("(", " ", ")")
}
private def isValidFrameType(ft: DataType): Boolean = (orderSpec.head.dataType, ft) match {
case (DateType, IntegerType) => true
case (TimestampType, CalendarIntervalType) => true
case (a, b) => a == b
}
}
/**
* A Window specification reference that refers to the [[WindowSpecDefinition]] defined
* under the name `name`.
*/
case class WindowSpecReference(name: String) extends WindowSpec
/**
* The trait used to represent the type of a Window Frame.
*/
sealed trait FrameType {
def inputType: AbstractDataType
def sql: String
}
/**
* RowFrame treats rows in a partition individually. Values used in a row frame are considered
* to be physical offsets.
* For example, `ROW BETWEEN 1 PRECEDING AND 1 FOLLOWING` represents a 3-row frame,
* from the row that precedes the current row to the row that follows the current row.
*/
case object RowFrame extends FrameType {
override def inputType: AbstractDataType = IntegerType
override def sql: String = "ROWS"
}
/**
* RangeFrame treats rows in a partition as groups of peers. All rows having the same `ORDER BY`
* ordering are considered as peers. Values used in a range frame are considered to be logical
* offsets.
* For example, assuming the value of the current row's `ORDER BY` expression `expr` is `v`,
* `RANGE BETWEEN 1 PRECEDING AND 1 FOLLOWING` represents a frame containing rows whose values
* `expr` are in the range of [v-1, v+1].
*
* If `ORDER BY` clause is not defined, all rows in the partition are considered as peers
* of the current row.
*/
case object RangeFrame extends FrameType {
override def inputType: AbstractDataType = TypeCollection.NumericAndInterval
override def sql: String = "RANGE"
}
/**
* The trait used to represent special boundaries used in a window frame.
*/
sealed trait SpecialFrameBoundary extends Expression with Unevaluable {
override def children: Seq[Expression] = Nil
override def dataType: DataType = NullType
override def foldable: Boolean = false
override def nullable: Boolean = false
}
/** UNBOUNDED boundary. */
case object UnboundedPreceding extends SpecialFrameBoundary {
override def sql: String = "UNBOUNDED PRECEDING"
}
case object UnboundedFollowing extends SpecialFrameBoundary {
override def sql: String = "UNBOUNDED FOLLOWING"
}
/** CURRENT ROW boundary. */
case object CurrentRow extends SpecialFrameBoundary {
override def sql: String = "CURRENT ROW"
}
/**
* Represents a window frame.
*/
sealed trait WindowFrame extends Expression with Unevaluable {
override def children: Seq[Expression] = Nil
override def dataType: DataType = throw new UnsupportedOperationException("dataType")
override def foldable: Boolean = false
override def nullable: Boolean = false
}
/** Used as a placeholder when a frame specification is not defined. */
case object UnspecifiedFrame extends WindowFrame
/**
* A specified Window Frame. The val lower/uppper can be either a foldable [[Expression]] or a
* [[SpecialFrameBoundary]].
*/
case class SpecifiedWindowFrame(
frameType: FrameType,
lower: Expression,
upper: Expression)
extends WindowFrame {
override def children: Seq[Expression] = lower :: upper :: Nil
lazy val valueBoundary: Seq[Expression] =
children.filterNot(_.isInstanceOf[SpecialFrameBoundary])
override def checkInputDataTypes(): TypeCheckResult = {
// Check lower value.
val lowerCheck = checkBoundary(lower, "lower")
if (lowerCheck.isFailure) {
return lowerCheck
}
// Check upper value.
val upperCheck = checkBoundary(upper, "upper")
if (upperCheck.isFailure) {
return upperCheck
}
// Check combination (of expressions).
(lower, upper) match {
case (l: Expression, u: Expression) if !isValidFrameBoundary(l, u) =>
TypeCheckFailure(s"Window frame upper bound '$upper' does not follow the lower bound " +
s"'$lower'.")
case (l: SpecialFrameBoundary, _) => TypeCheckSuccess
case (_, u: SpecialFrameBoundary) => TypeCheckSuccess
case (l: Expression, u: Expression) if l.dataType != u.dataType =>
TypeCheckFailure(
s"Window frame bounds '$lower' and '$upper' do no not have the same data type: " +
s"'${l.dataType.catalogString}' <> '${u.dataType.catalogString}'")
case (l: Expression, u: Expression) if isGreaterThan(l, u) =>
TypeCheckFailure(
"The lower bound of a window frame must be less than or equal to the upper bound")
case _ => TypeCheckSuccess
}
}
override def sql: String = {
val lowerSql = boundarySql(lower)
val upperSql = boundarySql(upper)
s"${frameType.sql} BETWEEN $lowerSql AND $upperSql"
}
def isUnbounded: Boolean = lower == UnboundedPreceding && upper == UnboundedFollowing
def isValueBound: Boolean = valueBoundary.nonEmpty
def isOffset: Boolean = (lower, upper) match {
case (l: Expression, u: Expression) => frameType == RowFrame && l == u
case _ => false
}
private def boundarySql(expr: Expression): String = expr match {
case e: SpecialFrameBoundary => e.sql
case UnaryMinus(n) => n.sql + " PRECEDING"
case e: Expression => e.sql + " FOLLOWING"
}
// Check whether the left boundary value is greater than the right boundary value. It's required
// that the both expressions have the same data type.
// Since CalendarIntervalType is not comparable, we only compare expressions that are AtomicType.
private def isGreaterThan(l: Expression, r: Expression): Boolean = l.dataType match {
case _: AtomicType => GreaterThan(l, r).eval().asInstanceOf[Boolean]
case _ => false
}
private def checkBoundary(b: Expression, location: String): TypeCheckResult = b match {
case _: SpecialFrameBoundary => TypeCheckSuccess
case e: Expression if !e.foldable =>
TypeCheckFailure(s"Window frame $location bound '$e' is not a literal.")
case e: Expression if !frameType.inputType.acceptsType(e.dataType) =>
TypeCheckFailure(
s"The data type of the $location bound '${e.dataType.catalogString}' does not match " +
s"the expected data type '${frameType.inputType.simpleString}'.")
case _ => TypeCheckSuccess
}
private def isValidFrameBoundary(l: Expression, u: Expression): Boolean = {
(l, u) match {
case (UnboundedFollowing, _) => false
case (_, UnboundedPreceding) => false
case _ => true
}
}
}
case class UnresolvedWindowExpression(
child: Expression,
windowSpec: WindowSpecReference) extends UnaryExpression with Unevaluable {
override def dataType: DataType = throw new UnresolvedException(this, "dataType")
override def foldable: Boolean = throw new UnresolvedException(this, "foldable")
override def nullable: Boolean = throw new UnresolvedException(this, "nullable")
override lazy val resolved = false
}
case class WindowExpression(
windowFunction: Expression,
windowSpec: WindowSpecDefinition) extends Expression with Unevaluable {
override def children: Seq[Expression] = windowFunction :: windowSpec :: Nil
override def dataType: DataType = windowFunction.dataType
override def foldable: Boolean = windowFunction.foldable
override def nullable: Boolean = windowFunction.nullable
override def toString: String = s"$windowFunction $windowSpec"
override def sql: String = windowFunction.sql + " OVER " + windowSpec.sql
}
/**
* A window function is a function that can only be evaluated in the context of a window operator.
*/
trait WindowFunction extends Expression {
/** Frame in which the window operator must be executed. */
def frame: WindowFrame = UnspecifiedFrame
}
/**
* Case objects that describe whether a window function is a SQL window function or a Python
* user-defined window function.
*/
sealed trait WindowFunctionType
object WindowFunctionType {
case object SQL extends WindowFunctionType
case object Python extends WindowFunctionType
def functionType(windowExpression: NamedExpression): WindowFunctionType = {
val t = windowExpression.collectFirst {
case _: WindowFunction | _: AggregateFunction => SQL
case udf: PythonUDF if PythonUDF.isWindowPandasUDF(udf) => Python
}
// Normally a window expression would either have a SQL window function, a SQL
// aggregate function or a python window UDF. However, sometimes the optimizer will replace
// the window function if the value of the window function can be predetermined.
// For example, for query:
//
// select count(NULL) over () from values 1.0, 2.0, 3.0 T(a)
//
// The window function will be replaced by expression literal(0)
// To handle this case, if a window expression doesn't have a regular window function, we
// consider its type to be SQL as literal(0) is also a SQL expression.
t.getOrElse(SQL)
}
}
/**
* An offset window function is a window function that returns the value of the input column offset
* by a number of rows within the partition. For instance: an OffsetWindowfunction for value x with
* offset -2, will get the value of x 2 rows back in the partition.
*/
abstract class OffsetWindowFunction
extends Expression with WindowFunction with Unevaluable with ImplicitCastInputTypes {
/**
* Input expression to evaluate against a row which a number of rows below or above (depending on
* the value and sign of the offset) the current row.
*/
val input: Expression
/**
* Default result value for the function when the `offset`th row does not exist.
*/
val default: Expression
/**
* (Foldable) expression that contains the number of rows between the current row and the row
* where the input expression is evaluated.
*/
val offset: Expression
/**
* Direction of the number of rows between the current row and the row where the input expression
* is evaluated.
*/
val direction: SortDirection
override def children: Seq[Expression] = Seq(input, offset, default)
/*
* The result of an OffsetWindowFunction is dependent on the frame in which the
* OffsetWindowFunction is executed, the input expression and the default expression. Even when
* both the input and the default expression are foldable, the result is still not foldable due to
* the frame.
*/
override def foldable: Boolean = false
override def nullable: Boolean = default == null || default.nullable || input.nullable
override lazy val frame: WindowFrame = {
val boundary = direction match {
case Ascending => offset
case Descending => UnaryMinus(offset) match {
case e: Expression if e.foldable => Literal.create(e.eval(EmptyRow), e.dataType)
case o => o
}
}
SpecifiedWindowFrame(RowFrame, boundary, boundary)
}
override def checkInputDataTypes(): TypeCheckResult = {
val check = super.checkInputDataTypes()
if (check.isFailure) {
check
} else if (!offset.foldable) {
TypeCheckFailure(s"Offset expression '$offset' must be a literal.")
} else {
TypeCheckSuccess
}
}
override def dataType: DataType = input.dataType
override def inputTypes: Seq[AbstractDataType] =
Seq(AnyDataType, IntegerType, TypeCollection(input.dataType, NullType))
override def toString: String = s"$prettyName($input, $offset, $default)"
}
/**
* The Lead function returns the value of `input` at the `offset`th row after the current row in
* the window. Offsets start at 0, which is the current row. The offset must be constant
* integer value. The default offset is 1. When the value of `input` is null at the `offset`th row,
* null is returned. If there is no such offset row, the `default` expression is evaluated.
*
* @param input expression to evaluate `offset` rows after the current row.
* @param offset rows to jump ahead in the partition.
* @param default to use when the offset is larger than the window. The default value is null.
*/
@ExpressionDescription(
usage = """
_FUNC_(input[, offset[, default]]) - Returns the value of `input` at the `offset`th row
after the current row in the window. The default value of `offset` is 1 and the default
value of `default` is null. If the value of `input` at the `offset`th row is null,
null is returned. If there is no such an offset row (e.g., when the offset is 1, the last
row of the window does not have any subsequent row), `default` is returned.
""")
case class Lead(input: Expression, offset: Expression, default: Expression)
extends OffsetWindowFunction {
def this(input: Expression, offset: Expression) = this(input, offset, Literal(null))
def this(input: Expression) = this(input, Literal(1))
def this() = this(Literal(null))
override val direction = Ascending
}
/**
* The Lag function returns the value of `input` at the `offset`th row before the current row in
* the window. Offsets start at 0, which is the current row. The offset must be constant
* integer value. The default offset is 1. When the value of `input` is null at the `offset`th row,
* null is returned. If there is no such offset row, the `default` expression is evaluated.
*
* @param input expression to evaluate `offset` rows before the current row.
* @param offset rows to jump back in the partition.
* @param default to use when the offset row does not exist.
*/
@ExpressionDescription(
usage = """
_FUNC_(input[, offset[, default]]) - Returns the value of `input` at the `offset`th row
before the current row in the window. The default value of `offset` is 1 and the default
value of `default` is null. If the value of `input` at the `offset`th row is null,
null is returned. If there is no such offset row (e.g., when the offset is 1, the first
row of the window does not have any previous row), `default` is returned.
""")
case class Lag(input: Expression, offset: Expression, default: Expression)
extends OffsetWindowFunction {
def this(input: Expression, offset: Expression) = this(input, offset, Literal(null))
def this(input: Expression) = this(input, Literal(1))
def this() = this(Literal(null))
override val direction = Descending
}
abstract class AggregateWindowFunction extends DeclarativeAggregate with WindowFunction {
self: Product =>
override val frame = SpecifiedWindowFrame(RowFrame, UnboundedPreceding, CurrentRow)
override def dataType: DataType = IntegerType
override def nullable: Boolean = true
override lazy val mergeExpressions =
throw new UnsupportedOperationException("Window Functions do not support merging.")
}
abstract class RowNumberLike extends AggregateWindowFunction {
override def children: Seq[Expression] = Nil
protected val zero = Literal(0)
protected val one = Literal(1)
protected val rowNumber = AttributeReference("rowNumber", IntegerType, nullable = false)()
override val aggBufferAttributes: Seq[AttributeReference] = rowNumber :: Nil
override val initialValues: Seq[Expression] = zero :: Nil
override val updateExpressions: Seq[Expression] = rowNumber + one :: Nil
}
/**
* A [[SizeBasedWindowFunction]] needs the size of the current window for its calculation.
*/
trait SizeBasedWindowFunction extends AggregateWindowFunction {
// It's made a val so that the attribute created on driver side is serialized to executor side.
// Otherwise, if it's defined as a function, when it's called on executor side, it actually
// returns the singleton value instantiated on executor side, which has different expression ID
// from the one created on driver side.
val n: AttributeReference = SizeBasedWindowFunction.n
}
object SizeBasedWindowFunction {
val n = AttributeReference("window__partition__size", IntegerType, nullable = false)()
}
/**
* The RowNumber function computes a unique, sequential number to each row, starting with one,
* according to the ordering of rows within the window partition.
*
* This documentation has been based upon similar documentation for the Hive and Presto projects.
*/
@ExpressionDescription(
usage = """
_FUNC_() - Assigns a unique, sequential number to each row, starting with one,
according to the ordering of rows within the window partition.
""")
case class RowNumber() extends RowNumberLike {
override val evaluateExpression = rowNumber
override def prettyName: String = "row_number"
}
/**
* The CumeDist function computes the position of a value relative to all values in the partition.
* The result is the number of rows preceding or equal to the current row in the ordering of the
* partition divided by the total number of rows in the window partition. Any tie values in the
* ordering will evaluate to the same position.
*
* This documentation has been based upon similar documentation for the Hive and Presto projects.
*/
@ExpressionDescription(
usage = """
_FUNC_() - Computes the position of a value relative to all values in the partition.
""")
case class CumeDist() extends RowNumberLike with SizeBasedWindowFunction {
override def dataType: DataType = DoubleType
// The frame for CUME_DIST is Range based instead of Row based, because CUME_DIST must
// return the same value for equal values in the partition.
override val frame = SpecifiedWindowFrame(RangeFrame, UnboundedPreceding, CurrentRow)
override val evaluateExpression = rowNumber.cast(DoubleType) / n.cast(DoubleType)
override def prettyName: String = "cume_dist"
}
/**
* The NTile function divides the rows for each window partition into `n` buckets ranging from 1 to
* at most `n`. Bucket values will differ by at most 1. If the number of rows in the partition does
* not divide evenly into the number of buckets, then the remainder values are distributed one per
* bucket, starting with the first bucket.
*
* The NTile function is particularly useful for the calculation of tertiles, quartiles, deciles and
* other common summary statistics
*
* The function calculates two variables during initialization: The size of a regular bucket, and
* the number of buckets that will have one extra row added to it (when the rows do not evenly fit
* into the number of buckets); both variables are based on the size of the current partition.
* During the calculation process the function keeps track of the current row number, the current
* bucket number, and the row number at which the bucket will change (bucketThreshold). When the
* current row number reaches bucket threshold, the bucket value is increased by one and the
* threshold is increased by the bucket size (plus one extra if the current bucket is padded).
*
* This documentation has been based upon similar documentation for the Hive and Presto projects.
*
* @param buckets number of buckets to divide the rows in. Default value is 1.
*/
@ExpressionDescription(
usage = """
_FUNC_(n) - Divides the rows for each window partition into `n` buckets ranging
from 1 to at most `n`.
""")
case class NTile(buckets: Expression) extends RowNumberLike with SizeBasedWindowFunction {
def this() = this(Literal(1))
override def children: Seq[Expression] = Seq(buckets)
// Validate buckets. Note that this could be relaxed, the bucket value only needs to constant
// for each partition.
override def checkInputDataTypes(): TypeCheckResult = {
if (!buckets.foldable) {
return TypeCheckFailure(s"Buckets expression must be foldable, but got $buckets")
}
if (buckets.dataType != IntegerType) {
return TypeCheckFailure(s"Buckets expression must be integer type, but got $buckets")
}
val i = buckets.eval().asInstanceOf[Int]
if (i > 0) {
TypeCheckSuccess
} else {
TypeCheckFailure(s"Buckets expression must be positive, but got: $i")
}
}
private val bucket = AttributeReference("bucket", IntegerType, nullable = false)()
private val bucketThreshold =
AttributeReference("bucketThreshold", IntegerType, nullable = false)()
private val bucketSize = AttributeReference("bucketSize", IntegerType, nullable = false)()
private val bucketsWithPadding =
AttributeReference("bucketsWithPadding", IntegerType, nullable = false)()
private def bucketOverflow(e: Expression) = If(rowNumber >= bucketThreshold, e, zero)
override val aggBufferAttributes = Seq(
rowNumber,
bucket,
bucketThreshold,
bucketSize,
bucketsWithPadding
)
override val initialValues = Seq(
zero,
zero,
zero,
(n / buckets).cast(IntegerType),
(n % buckets).cast(IntegerType)
)
override val updateExpressions = Seq(
rowNumber + one,
bucket + bucketOverflow(one),
bucketThreshold + bucketOverflow(bucketSize + If(bucket < bucketsWithPadding, one, zero)),
NoOp,
NoOp
)
override val evaluateExpression = bucket
}
/**
* A RankLike function is a WindowFunction that changes its value based on a change in the value of
* the order of the window in which is processed. For instance, when the value of `input` changes
* in a window ordered by `input` the rank function also changes. The size of the change of the
* rank function is (typically) not dependent on the size of the change in `input`.
*
* This documentation has been based upon similar documentation for the Hive and Presto projects.
*/
abstract class RankLike extends AggregateWindowFunction {
/** Store the values of the window 'order' expressions. */
protected val orderAttrs = children.map { expr =>
AttributeReference(expr.sql, expr.dataType)()
}
/** Predicate that detects if the order attributes have changed. */
protected val orderEquals = children.zip(orderAttrs)
.map(EqualNullSafe.tupled)
.reduceOption(And)
.getOrElse(Literal(true))
protected val orderInit = children.map(e => Literal.create(null, e.dataType))
protected val rank = AttributeReference("rank", IntegerType, nullable = false)()
protected val rowNumber = AttributeReference("rowNumber", IntegerType, nullable = false)()
protected val zero = Literal(0)
protected val one = Literal(1)
protected val increaseRowNumber = rowNumber + one
/**
* Different RankLike implementations use different source expressions to update their rank value.
* Rank for instance uses the number of rows seen, whereas DenseRank uses the number of changes.
*/
protected def rankSource: Expression = rowNumber
/** Increase the rank when the current rank == 0 or when the one of order attributes changes. */
protected val increaseRank = If(orderEquals && rank =!= zero, rank, rankSource)
override val aggBufferAttributes: Seq[AttributeReference] = rank +: rowNumber +: orderAttrs
override val initialValues = zero +: one +: orderInit
override val updateExpressions = increaseRank +: increaseRowNumber +: children
override val evaluateExpression: Expression = rank
override def sql: String = s"${prettyName.toUpperCase(Locale.ROOT)}()"
def withOrder(order: Seq[Expression]): RankLike
}
/**
* The Rank function computes the rank of a value in a group of values. The result is one plus the
* number of rows preceding or equal to the current row in the ordering of the partition. The values
* will produce gaps in the sequence.
*
* This documentation has been based upon similar documentation for the Hive and Presto projects.
*
* @param children to base the rank on; a change in the value of one the children will trigger a
* change in rank. This is an internal parameter and will be assigned by the
* Analyser.
*/
@ExpressionDescription(
usage = """
_FUNC_() - Computes the rank of a value in a group of values. The result is one plus the number
of rows preceding or equal to the current row in the ordering of the partition. The values
will produce gaps in the sequence.
""")
case class Rank(children: Seq[Expression]) extends RankLike {
def this() = this(Nil)
override def withOrder(order: Seq[Expression]): Rank = Rank(order)
}
/**
* The DenseRank function computes the rank of a value in a group of values. The result is one plus
* the previously assigned rank value. Unlike [[Rank]], [[DenseRank]] will not produce gaps in the
* ranking sequence.
*
* This documentation has been based upon similar documentation for the Hive and Presto projects.
*
* @param children to base the rank on; a change in the value of one the children will trigger a
* change in rank. This is an internal parameter and will be assigned by the
* Analyser.
*/
@ExpressionDescription(
usage = """
_FUNC_() - Computes the rank of a value in a group of values. The result is one plus the
previously assigned rank value. Unlike the function rank, dense_rank will not produce gaps
in the ranking sequence.
""")
case class DenseRank(children: Seq[Expression]) extends RankLike {
def this() = this(Nil)
override def withOrder(order: Seq[Expression]): DenseRank = DenseRank(order)
override protected def rankSource = rank + one
override val updateExpressions = increaseRank +: children
override val aggBufferAttributes = rank +: orderAttrs
override val initialValues = zero +: orderInit
override def prettyName: String = "dense_rank"
}
/**
* The PercentRank function computes the percentage ranking of a value in a group of values. The
* result the rank of the minus one divided by the total number of rows in the partition minus one:
* (r - 1) / (n - 1). If a partition only contains one row, the function will return 0.
*
* The PercentRank function is similar to the CumeDist function, but it uses rank values instead of
* row counts in the its numerator.
*
* This documentation has been based upon similar documentation for the Hive and Presto projects.
*
* @param children to base the rank on; a change in the value of one of the children will trigger a
* change in rank. This is an internal parameter and will be assigned by the
* Analyser.
*/
@ExpressionDescription(
usage = """
_FUNC_() - Computes the percentage ranking of a value in a group of values.
""")
case class PercentRank(children: Seq[Expression]) extends RankLike with SizeBasedWindowFunction {
def this() = this(Nil)
override def withOrder(order: Seq[Expression]): PercentRank = PercentRank(order)
override def dataType: DataType = DoubleType
override val evaluateExpression =
If(n > one, (rank - one).cast(DoubleType) / (n - one).cast(DoubleType), 0.0d)
override def prettyName: String = "percent_rank"
}
|
ahnqirage/spark
|
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/windowExpressions.scala
|
Scala
|
apache-2.0
| 30,501 |
/*
* Copyright (c) 2016 SnappyData, Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package org.apache.spark.examples.snappydata
import java.io.File
import scala.util.Try
import com.typesafe.config.{Config, ConfigFactory}
import org.apache.log4j.{Level, Logger}
import org.apache.spark.sql.{SnappyContext, SnappyJobInvalid, SnappyJobValid, SnappyJobValidation, SnappySQLJob, SnappySession, SparkSession}
/**
* This is a sample code snippet to work with JSON files and SnappyStore column tables.
* Run with
* <pre>
* bin/run-example snappydata.WorkingWithJson quickstart/src/main/resources
* </pre>
* Also you can run this example by submitting as a job.
* <pre>
* cd $SNAPPY_HOME
* bin/snappy-job.sh submit
* --app-name JsonApp
* --class org.apache.spark.examples.snappydata.WorkingWithJson
* --app-jar examples/jars/quickstart.jar
* --lead [leadHost:port]
* --conf json_resource_folder=../../quickstart/src/main/resources
*
* Check the status of your job id
* bin/snappy-job.sh status --lead [leadHost:port] --job-id [job-id]
*/
object WorkingWithJson extends SnappySQLJob {
private val NPARAMS = 1
private var jsonFolder: String = ""
override def isValidJob(sc: SnappySession, config: Config): SnappyJobValidation ={
{
Try(config.getString("json_resource_folder"))
.map(x => SnappyJobValid())
.getOrElse(SnappyJobInvalid("No json_resource_folder config param"))
}
}
override def runSnappyJob(snSession: SnappySession, jobConfig: Config): Any = {
val some_people_path = s"${jobConfig.getString("json_resource_folder")}/some_people.json"
// Read a JSON file using Spark API
val people = snSession.read.json(some_people_path)
people.printSchema()
//Drop the table if it exists.
snSession.dropTable("people", ifExists = true)
//Create a columnar table with the Json DataFrame schema
snSession.createTable(tableName = "people",
provider = "column",
schema = people.schema,
options = Map.empty[String,String],
allowExisting = false)
// Write the created DataFrame to the columnar table.
people.write.insertInto("people")
// Append more people to the column table
val more_people_path = s"${jobConfig.getString("json_resource_folder")}/more_people.json"
//Explicitly passing schema to handle record level field mismatch
// e.g. some records have "district" field while some do not.
val morePeople = snSession.read.schema(people.schema).json(more_people_path)
morePeople.write.insertInto("people")
//print schema of the table
println("Print Schema of the table\\n################")
println(snSession.table("people").schema)
println
// Query it like any other table
val nameAndAddress = snSession.sql("SELECT " +
"name, " +
"address.city, " +
"address.state, " +
"address.district, " +
"address.lane " +
"FROM people")
val allPersons = nameAndAddress.toJSON
allPersons.show(truncate = false)
}
def main(args: Array[String]) {
parseArgs(args)
// reducing the log level to minimize the messages on console
Logger.getLogger("org").setLevel(Level.ERROR)
Logger.getLogger("akka").setLevel(Level.ERROR)
val dataDirAbsolutePath: String = createAndGetDataDir
val spark: SparkSession = SparkSession
.builder
.appName("WorkingWithJson")
.master("local[*]")
// sys-disk-dir attribute specifies the directory where persistent data is saved
.config("snappydata.store.sys-disk-dir", dataDirAbsolutePath)
.config("snappydata.store.log-file", dataDirAbsolutePath + "/SnappyDataExample.log")
.getOrCreate
val snSession = new SnappySession(spark.sparkContext)
val config = ConfigFactory.parseString(s"json_resource_folder=$jsonFolder")
runSnappyJob(snSession, config)
spark.stop()
}
def createAndGetDataDir: String = {
// creating a directory to save all persistent data
val dataDir = "./" + "snappydata_examples_data"
new File(dataDir).mkdir()
val dataDirAbsolutePath = new File(dataDir).getAbsolutePath
dataDirAbsolutePath
}
private def parseArgs(args: Array[String]): Unit = {
if (args.length != NPARAMS) {
printUsage()
System.exit(1)
}
jsonFolder = args(0)
}
private def printUsage(): Unit = {
val usage: String =
"Usage: WorkingWithJson <jsonFolderPath> \\n" +
"\\n" +
"jsonFolderPath - (string) local folder where some_people.json & more_people.json are located\\n"
println(usage)
}
}
|
vjr/snappydata
|
examples/src/main/scala/org/apache/spark/examples/snappydata/WorkingWithJson.scala
|
Scala
|
apache-2.0
| 5,193 |
package calculator
sealed abstract class Expr
final case class Literal(v: Double) extends Expr
final case class Ref(name: String) extends Expr
final case class Plus(a: Expr, b: Expr) extends Expr
final case class Minus(a: Expr, b: Expr) extends Expr
final case class Times(a: Expr, b: Expr) extends Expr
final case class Divide(a: Expr, b: Expr) extends Expr
object Calculator {
def computeValues(namedExpressions: Map[String, Signal[Expr]]): Map[String, Signal[Double]] = {
for {
entry <- namedExpressions
} yield (entry._1, Var(eval(entry._2(), namedExpressions)))
}
def eval(expr: Expr, references: Map[String, Signal[Expr]]): Double = {
def innerEval(expr: Expr): Double = {
expr match {
case Literal(v) => v
case Plus(a, b) => innerEval(a) + innerEval(b)
case Minus(a, b) => innerEval(a) - innerEval(b)
case Times(a, b) => innerEval(a) * innerEval(b)
case Divide(a, b) => innerEval(a) / innerEval(b)
case Ref(name) => innerEval(getReferenceExpr(name, references))
}
}
innerEval(expr)
}
/** Get the Expr for a referenced variables.
* If the variable is not known, returns a literal NaN.
*/
private def getReferenceExpr(name: String, references: Map[String, Signal[Expr]]) = {
references.get(name).fold[Expr] {
Literal(Double.NaN)
} { exprSignal =>
exprSignal()
}
}
}
|
alicanalbayrak/ScalaExperiments
|
calculator/src/main/scala/calculator/Calculator.scala
|
Scala
|
mit
| 1,418 |
/*
* Copyright 2022 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package forms
import java.time.LocalDate
import java.time.format.{DateTimeFormatter, ResolverStyle}
import models.{DateModel, MonthYearModel}
import org.apache.commons.lang3.StringUtils
import org.apache.commons.lang3.StringUtils.isNotBlank
import play.api.data.format.Formatter
import play.api.data.validation.{Constraint, _}
import play.api.data.{FieldMapping, FormError, Forms, Mapping}
import play.api.libs.json.Reads.email
import play.api.libs.json.{JsString, JsSuccess}
import scala.util.matching.Regex
import scala.util.{Failure, Success, Try}
// scalastyle:off
object FormValidation {
type ErrorCode = String
val trimmedText: Mapping[String] = Forms.text.transform[String](_.trim, identity)
def regexPattern(pattern: Regex, mandatory: Boolean = true)(implicit e: ErrorCode): Constraint[String] = Constraint {
input: String =>
mandatoryText.apply(input) match {
case Valid => Constraints.pattern(pattern, error = s"validation.$e.invalid")(input)
case err => if (mandatory) err else Valid
}
}
def matchesRegex(pattern: Regex, errKey: String): Constraint[String] = Constraint {
input: String => Constraints.pattern(pattern, error = errKey)(input)
}
def mandatory(errKey: String): Constraint[String] = mandatoryGen[String](errKey)
def mandatoryLong(errKey: String): Constraint[Long] = mandatoryGen[Long](errKey)
private def mandatoryGen[T](errKey: String): Constraint[T] = Constraint { input: T =>
if (StringUtils.isNotBlank(input.toString)) Valid else Invalid(errKey)
}
def mandatoryTuple3(errKey: String): Constraint[(String, String, String)] = Constraint { input: (String, String, String) =>
input match {
case (_1, _2, _3) if isNotBlank(_1) & isNotBlank(_2) & isNotBlank(_3) => Valid
case _ => Invalid(errKey)
}
}
def IsEmail(implicit e: ErrorCode): Constraint[String] = Constraint { input: String =>
JsString(input).validateOpt[String](email) match {
case JsSuccess(Some(_), _) => Valid
case _ => Invalid(s"validation.$e.invalid")
}
}
def mandatoryText()(implicit e: ErrorCode): Constraint[String] = Constraint { input: String =>
if (StringUtils.isNotBlank(input)) Valid else Invalid(s"validation.$e.missing")
}
def maxLenText(maxlen: Integer)(implicit e: ErrorCode): Constraint[String] = Constraint { input: String =>
if (StringUtils.length(input) > maxlen) Invalid(s"validation.$e.maxlen") else Valid
}
def mandatoryNumericText()(implicit e: ErrorCode): Constraint[String] = Constraint {
val NumericText = """[0-9]+""".r
(input: String) =>
input match {
case NumericText(_*) => Valid
case _ if StringUtils.isBlank(input) => Invalid(s"validation.$e.missing")
case _ => Invalid("validation.numeric")
}
}
def mandatoryFullNumericText()(implicit e: ErrorCode): Constraint[String] = Constraint {
val NumericText = """[-.,0-9]+""".r
(input: String) =>
input match {
case NumericText(_*) => Valid
case _ if StringUtils.isBlank(input) => Invalid(s"validation.$e.missing")
case _ => Invalid("validation.numeric")
}
}
private def unconstrained[T] = Constraint[T] { (t: T) => Valid }
def inRange[T](minValue: T, maxValue: T)(implicit ordering: Ordering[T], e: ErrorCode): Constraint[T]
= inRangeWithArgs[T](minValue, maxValue)(Seq())(ordering, e)
def inRangeWithArgs[T](minValue: T, maxValue: T)(args: Seq[Any] = Seq())(implicit ordering: Ordering[T], e: ErrorCode): Constraint[T] =
Constraint[T] { (t: T) =>
(ordering.compare(t, minValue).signum, ordering.compare(t, maxValue).signum) match {
case (1, -1) | (0, _) | (_, 0) => Valid
case (_, 1) => Invalid(ValidationError(s"validation.$e.range.above"))
case (-1, _) if !args.isEmpty => Invalid(ValidationError(s"validation.$e.range.below"))
case (-1, _) => Invalid(ValidationError(s"validation.$e.range.below"))
}
}
def onOrAfter[T](minValue: T)(implicit ordering: Ordering[T], e: ErrorCode): Constraint[T] =
Constraint[T] { (t: T) =>
(ordering.compare(t, minValue).signum) match {
case (1) | (0) => Valid
case (-1) => Invalid(ValidationError(s"validation.$e.range.below", minValue))
}
}
val taxEstimateTextToLong = textToLong(0, 1000000000000000L) _
val numberOfWorkersToInt = textToInt(1, 99999) _
def removeSpaces(text: String): String = text.replaceAll(" ", "")
def removeNewlineAndTrim(s: String): String = s.replaceAll("\\r\\n|\\r|\\n|\\t", " ").trim
private def textToInt(min: Int, max: Int)(s: String): Int = {
// assumes input string will be numeric
val bigInt = BigInt(s)
bigInt match {
case _ if bigInt < min => Int.MinValue
case _ if bigInt > max => Int.MaxValue
case _ => bigInt.toInt
}
}
private def textToLong(min: Long, max: Long)(s: String): Long = {
// assumes input string will be numeric
val bigInt = BigInt(s)
bigInt match {
case _ if bigInt < min => Long.MinValue
case _ if bigInt > max => Long.MaxValue
case _ => bigInt.toLong
}
}
def intToText(i: Int): String = i.toString
def longToText(l: Long): String = l.toString
def verifyIsNumeric(errKey: String): Constraint[String] = Constraint {
inputToCheck: String =>
//checking for negatives
val input = if (inputToCheck.startsWith("-")) inputToCheck.drop(1) else inputToCheck
if (input.forall(_.isDigit)) Valid else Invalid(errKey)
}
def boundedLong(tooLowMessage: String, tooHighMessage: String): Constraint[String] = Constraint {
input: String =>
Try(input.toLong) match {
case Success(_) => Valid
case Failure(_: NumberFormatException) => {
if (input.startsWith("-")) Invalid(tooLowMessage) else Invalid(tooHighMessage)
}
}
}
def boundedLong()(implicit e: ErrorCode): Constraint[Long] = Constraint {
input: Long =>
input match {
case Long.MaxValue => Invalid(s"validation.$e.high")
case Long.MinValue => Invalid(s"validation.$e.low")
case _ => Valid
}
}
def boundedInt()(implicit e: ErrorCode): Constraint[Int] = Constraint {
input: Int =>
input match {
case Int.MaxValue => Invalid(s"validation.$e.high")
case Int.MinValue => Invalid(s"validation.$e.low")
case _ => Valid
}
}
def nonEmptyValidText(Pattern: Regex)(implicit e: ErrorCode): Constraint[String] = Constraint[String] {
input: String =>
input match {
case Pattern(_*) => Valid
case s if StringUtils.isNotBlank(s) => Invalid(s"validation.$e.invalid")
case _ => Invalid(s"validation.$e.missing")
}
}
def matches(matchers: List[String], errorMsg: String): Constraint[String] = Constraint[String] {
input: String =>
if (matchers.contains(input)) Valid else Invalid(errorMsg)
}
/* overrides Play's implicit stringFormatter and handles missing options (e.g. no radio button selected) */
private def stringFormat(suffix: String)(args: Seq[Any] = Seq())(implicit e: ErrorCode): Formatter[String] = new Formatter[String] {
def bind(key: String, data: Map[String, String]) = data.get(key).toRight(
Seq(FormError(key, s"validation.$e.$suffix", args))
)
def unbind(key: String, value: String) = Map(key -> value)
}
private def booleanFormat()(args: Seq[Any] = Seq())(implicit e: ErrorCode): Formatter[Boolean] = new Formatter[Boolean] {
def bind(key: String, data: Map[String, String]) = data.get(key).flatMap(input => Try(input.toBoolean).toOption)
.toRight(Seq(FormError(key, s"validation.$e.missing", args)))
def unbind(key: String, value: Boolean) = Map(key -> value.toString)
}
def textMapping()(implicit e: ErrorCode): Mapping[String] = FieldMapping[String]()(stringFormat("missing")())
def textMappingWithMessageArgs()(args: Seq[Any] = Seq())(implicit e: ErrorCode): Mapping[String] = FieldMapping[String]()(stringFormat("missing")(args))
def missingBooleanFieldMappingArgs()(args: Seq[Any] = Seq())(implicit e: ErrorCode): Mapping[Boolean] = FieldMapping[Boolean]()(booleanFormat()(args))
def missingBooleanFieldMapping()(implicit e: ErrorCode): Mapping[Boolean] =
FieldMapping[Boolean]()(booleanFormat()())
def nonEmptyDate(errKey: String): Constraint[(String, String, String)] = Constraint {
input: (String, String, String) =>
(input._1.nonEmpty, input._2.nonEmpty, input._3.nonEmpty) match {
case (true, true, true) => Valid
case _ => Invalid(errKey)
}
}
def isValidPhoneNumber(formName:String): Constraint[String] = Constraint { phone: String =>
val isValidNumber:Option[Int] = if(phone.matches("^[A-Z0-9 )/(*#+-]+$")) Some(phone.length) else None
isValidNumber match {
case Some(num) if(num > 24) => Invalid(s"validation.invalid.$formName.tooLong")
case Some(_) => Valid
case _ => Invalid(s"validation.invalid.$formName")
}
}
private def tupleToDate(dateTuple: (String,String,String)) = {
LocalDate.parse(s"${dateTuple._1}-${dateTuple._2}-${dateTuple._3}", DateTimeFormatter.ofPattern("d-M-uuuu").withResolverStyle(ResolverStyle.STRICT))
}
def validDate(errKey: String): Constraint[(String, String, String)] = Constraint {
input: (String, String, String) =>
val date = Try {
tupleToDate(input)
}.toOption
date match {
case Some(valid) => Valid
case None => Invalid(errKey)
}
}
def withinRange(minDate: LocalDate, maxDate: LocalDate, beforeMinErr: String, afterMaxErr: String, args: List[String]): Constraint[(String, String, String)] = Constraint {
input: (String, String, String) =>
val date = tupleToDate(input)
if (date.isEqual(minDate) || date.isAfter(minDate))
if (date.isEqual(maxDate) || date.isBefore(maxDate)) Valid else Invalid(afterMaxErr, args: _*)
else Invalid(beforeMinErr, args: _*)
}
def withinFourYearsPast(errKey: String): Constraint[(String, String, String)] = Constraint {
input: (String, String, String) =>
val date = tupleToDate(input)
if (date.isAfter(LocalDate.now().minusYears(4).minusDays(1))) Valid else Invalid(errKey)
}
object Dates {
def nonEmptyDateModel(constraint: => Constraint[DateModel] = unconstrained)(implicit e: ErrorCode): Constraint[DateModel] =
Constraint { dm =>
mandatoryText.apply(Seq(dm.day, dm.month, dm.day).mkString.trim) match {
case Valid => constraint(dm)
case err@_ => err
}
}
def nonEmptyMonthYearModel(constraint: => Constraint[MonthYearModel] = unconstrained)(implicit e: ErrorCode): Constraint[MonthYearModel] =
Constraint { pdm =>
mandatoryText.apply(Seq(pdm.month, pdm.year).mkString.trim) match {
case Valid => constraint(pdm)
case err@_ => err
}
}
def validDateModel(dateConstraint: => Constraint[LocalDate] = unconstrained)(implicit e: ErrorCode): Constraint[DateModel] =
Constraint(dm => dm.toLocalDate.fold[ValidationResult](Invalid(s"validation.$e.invalid"))(dateConstraint(_)))
def validPartialMonthYearModel(dateConstraint: => Constraint[LocalDate] = unconstrained)(implicit e: ErrorCode): Constraint[MonthYearModel] =
Constraint(dm => dm.toLocalDate.fold[ValidationResult](Invalid(s"validation.$e.invalid"))(dateConstraint(_)))
}
}
|
hmrc/vat-registration-frontend
|
app/forms/FormValidation.scala
|
Scala
|
apache-2.0
| 12,047 |
/*
* Scala.js (https://www.scala-js.org/)
*
* Copyright EPFL.
*
* Licensed under Apache License 2.0
* (https://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package org.scalajs.testsuite.junit
import org.junit.Test
// This a base class that is extended in the tests by MultiCompilationB
abstract class MultiCompilationTest {
@Test def testFromMultiCompilation(): Unit = ()
}
|
scala-js/scala-js
|
test-suite/js/src/main/scala/org/scalajs/testsuite/junit/MultiCompilationTest.scala
|
Scala
|
apache-2.0
| 496 |
package main.scala
import edu.utah.cs.simba.SimbaContext
import edu.utah.cs.simba.index.RTreeType
import org.apache.spark.{SparkConf, SparkContext}
/**
* Created by and on 3/20/17.
*/
object PartitionViewer {
case class PointItem(id: Int, x: Double, y: Double)
var master: String = "local[*]"
var filename: String = "/opt/Datasets/Beijing/P10K.csv"
var logs: String = "ERROR"
def main(args: Array[String]): Unit = {
// master = args(0)
// filename = args(1)
// logs = args(2)
val sparkConf = new SparkConf()
.setAppName("PartitionViewer")
.setMaster(master)
val sc = new SparkContext(sparkConf)
sc.setLogLevel(logs)
val simbaContext = new SimbaContext(sc)
import simbaContext.SimbaImplicits._
import simbaContext.implicits._
val points = sc.textFile(filename,10)
.map(_.split(","))
.map(p => PointItem(p(0).trim.toInt, p(1).trim.toDouble, p(2).trim.toDouble))
.toDF()
println(points.count())
points index(RTreeType, "rt", Array("x", "y"))
val mbrs = points.rdd.mapPartitionsWithIndex{ (index, iterator) =>
var min_x: Double = Double.MaxValue
var min_y: Double = Double.MaxValue
var max_x: Double = Double.MinValue
var max_y: Double = Double.MinValue
var size: Int = 0
iterator.toList.foreach{row =>
val x = row.getDouble(1)
val y = row.getDouble(2)
if(x < min_x){
min_x = x
}
if(y < min_y){
min_y = y
}
if(x > max_x){
max_x = x
}
if(y > max_y){
max_y = y
}
size += 1
}
List((min_x,min_y,max_x,max_y, s"$index", size)).iterator
}
val gson = new GeoGSON("4799")
mbrs.collect().foreach {row =>
gson.makeMBR(row._1,row._2,row._3,row._4,row._5, row._6)
}
gson.saveGeoJSON("RTree_B16M.json")
sc.stop()
}
}
|
aocalderon/PhD
|
Y2Q3/PBFE3/src/main/scala/main/scala/PartitionViewer.scala
|
Scala
|
lgpl-3.0
| 1,914 |
package com.scalableminds.webknossos.datastore.services
import akka.actor.ActorSystem
import com.google.inject.Inject
import com.google.inject.name.Named
import com.scalableminds.webknossos.datastore.models.datasource.inbox.InboxDataSource
import com.scalableminds.webknossos.datastore.models.datasource.{DataSource, DataSourceId}
import com.scalableminds.webknossos.datastore.storage.TemporaryStore
import com.scalableminds.util.tools.{Fox, FoxImplicits}
import com.typesafe.scalalogging.LazyLogging
import scala.concurrent.ExecutionContext.Implicits.global
class DataSourceRepository @Inject()(
remoteWebKnossosClient: DSRemoteWebKnossosClient,
@Named("webknossos-datastore") val system: ActorSystem
) extends TemporaryStore[DataSourceId, InboxDataSource](system)
with LazyLogging
with FoxImplicits {
def findUsable(id: DataSourceId): Option[DataSource] =
find(id).flatMap(_.toUsable)
def updateDataSource(dataSource: InboxDataSource): Fox[Unit] =
for {
_ <- Fox.successful(())
_ = insert(dataSource.id, dataSource)
_ <- remoteWebKnossosClient.reportDataSource(dataSource)
} yield ()
def updateDataSources(dataSources: List[InboxDataSource]): Fox[Unit] =
for {
_ <- Fox.successful(())
_ = removeAll()
_ = dataSources.foreach(dataSource => insert(dataSource.id, dataSource))
_ <- remoteWebKnossosClient.reportDataSources(dataSources)
} yield ()
def cleanUpDataSource(dataSourceId: DataSourceId): Fox[Unit] =
for {
_ <- Fox.successful(remove(dataSourceId))
_ <- remoteWebKnossosClient.deleteDataSource(dataSourceId)
} yield ()
}
|
scalableminds/webknossos
|
webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/DataSourceRepository.scala
|
Scala
|
agpl-3.0
| 1,642 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.graphx.util
import org.apache.spark.SparkFunSuite
class BytecodeUtilsSuite extends SparkFunSuite {
import BytecodeUtilsSuite.TestClass
test("closure invokes a method") {
val c1 = {e: TestClass => println(e.foo); println(e.bar); println(e.baz); }
assert(BytecodeUtils.invokedMethod(c1, classOf[TestClass], "foo"))
assert(BytecodeUtils.invokedMethod(c1, classOf[TestClass], "bar"))
assert(BytecodeUtils.invokedMethod(c1, classOf[TestClass], "baz"))
val c2 = {e: TestClass => println(e.foo); println(e.bar); }
assert(BytecodeUtils.invokedMethod(c2, classOf[TestClass], "foo"))
assert(BytecodeUtils.invokedMethod(c2, classOf[TestClass], "bar"))
assert(!BytecodeUtils.invokedMethod(c2, classOf[TestClass], "baz"))
val c3 = {e: TestClass => println(e.foo); }
assert(BytecodeUtils.invokedMethod(c3, classOf[TestClass], "foo"))
assert(!BytecodeUtils.invokedMethod(c3, classOf[TestClass], "bar"))
assert(!BytecodeUtils.invokedMethod(c3, classOf[TestClass], "baz"))
}
test("closure inside a closure invokes a method") {
val c1 = {e: TestClass => println(e.foo); println(e.bar); println(e.baz); }
val c2 = {e: TestClass => c1(e); println(e.foo); }
assert(BytecodeUtils.invokedMethod(c2, classOf[TestClass], "foo"))
assert(BytecodeUtils.invokedMethod(c2, classOf[TestClass], "bar"))
assert(BytecodeUtils.invokedMethod(c2, classOf[TestClass], "baz"))
}
test("closure inside a closure inside a closure invokes a method") {
val c1 = {e: TestClass => println(e.baz); }
val c2 = {e: TestClass => c1(e); println(e.foo); }
val c3 = {e: TestClass => c2(e) }
assert(BytecodeUtils.invokedMethod(c3, classOf[TestClass], "foo"))
assert(!BytecodeUtils.invokedMethod(c3, classOf[TestClass], "bar"))
assert(BytecodeUtils.invokedMethod(c3, classOf[TestClass], "baz"))
}
test("closure calling a function that invokes a method") {
def zoo(e: TestClass) {
println(e.baz)
}
val c1 = {e: TestClass => zoo(e)}
assert(!BytecodeUtils.invokedMethod(c1, classOf[TestClass], "foo"))
assert(!BytecodeUtils.invokedMethod(c1, classOf[TestClass], "bar"))
assert(BytecodeUtils.invokedMethod(c1, classOf[TestClass], "baz"))
}
test("closure calling a function that invokes a method which uses another closure") {
val c2 = {e: TestClass => println(e.baz)}
def zoo(e: TestClass) {
c2(e)
}
val c1 = {e: TestClass => zoo(e)}
assert(!BytecodeUtils.invokedMethod(c1, classOf[TestClass], "foo"))
assert(!BytecodeUtils.invokedMethod(c1, classOf[TestClass], "bar"))
assert(BytecodeUtils.invokedMethod(c1, classOf[TestClass], "baz"))
}
test("nested closure") {
val c2 = {e: TestClass => println(e.baz)}
def zoo(e: TestClass, c: TestClass => Unit) {
c(e)
}
val c1 = {e: TestClass => zoo(e, c2)}
assert(!BytecodeUtils.invokedMethod(c1, classOf[TestClass], "foo"))
assert(!BytecodeUtils.invokedMethod(c1, classOf[TestClass], "bar"))
assert(BytecodeUtils.invokedMethod(c1, classOf[TestClass], "baz"))
}
// The following doesn't work yet, because the byte code doesn't contain any information
// about what exactly "c" is.
// test("invoke interface") {
// val c1 = {e: TestClass => c(e)}
// assert(!BytecodeUtils.invokedMethod(c1, classOf[TestClass], "foo"))
// assert(!BytecodeUtils.invokedMethod(c1, classOf[TestClass], "bar"))
// assert(BytecodeUtils.invokedMethod(c1, classOf[TestClass], "baz"))
// }
private val c = {e: TestClass => println(e.baz)}
}
object BytecodeUtilsSuite {
class TestClass(val foo: Int, val bar: Long) {
def baz: Boolean = false
}
}
|
andrewor14/iolap
|
graphx/src/test/scala/org/apache/spark/graphx/util/BytecodeUtilsSuite.scala
|
Scala
|
apache-2.0
| 4,486 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.nn.ops
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.Table
import scala.reflect.ClassTag
class FloorMod[T: ClassTag, D: ClassTag]()(implicit ev: TensorNumeric[T], ev2: TensorNumeric[D])
extends Operation[Table, Tensor[D], T]{
output = Tensor[D]()
private val buffer = Tensor[D]()
override def updateOutput(input: Table): Tensor[D] = {
val input1 = input[Tensor[D]](1)
val input2 = input[Tensor[D]](2)
output.resizeAs(input1).copy(input1)
buffer.resizeAs(output).copy(output)
buffer.map(input2, (a, b) => ev2.floorDiv(a, b)).cmul(input2)
output.sub(buffer)
}
override def clearState(): FloorMod.this.type = {
super.clearState()
buffer.set()
this
}
override def getClassTagNumerics() : (Array[ClassTag[_]], Array[TensorNumeric[_]]) = {
(Array[ClassTag[_]](scala.reflect.classTag[T], scala.reflect.classTag[D]),
Array[TensorNumeric[_]](ev, ev2))
}
}
object FloorMod {
def apply[T: ClassTag, D: ClassTag]()(implicit ev: TensorNumeric[T], ev2: TensorNumeric[D])
: FloorMod[T, D] = new FloorMod()
}
|
yiheng/BigDL
|
spark/dl/src/main/scala/com/intel/analytics/bigdl/nn/ops/FloorMod.scala
|
Scala
|
apache-2.0
| 1,817 |
/**
* This file is part of mycollab-web.
*
* mycollab-web is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* mycollab-web is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with mycollab-web. If not, see <http://www.gnu.org/licenses/>.
*/
package com.esofthead.mycollab.module.crm.view.lead
import com.esofthead.mycollab.common.UrlTokenizer
import com.esofthead.mycollab.eventmanager.EventBusFactory
import com.esofthead.mycollab.module.crm.domain.Lead
import com.esofthead.mycollab.module.crm.events.LeadEvent
import com.esofthead.mycollab.module.crm.view.CrmUrlResolver
/**
* @author MyCollab Ltd
* @since 5.0.9
*/
class LeadUrlResolver extends CrmUrlResolver {
this.addSubResolver("list", new ListUrlResolver)
this.addSubResolver("preview", new PreviewUrlResolver)
this.addSubResolver("add", new AddUrlResolver)
this.addSubResolver("edit", new EditUrlResolver)
class ListUrlResolver extends CrmUrlResolver {
protected override def handlePage(params: String*) {
EventBusFactory.getInstance.post(new LeadEvent.GotoList(this, null))
}
}
class AddUrlResolver extends CrmUrlResolver {
protected override def handlePage(params: String*) {
EventBusFactory.getInstance.post(new LeadEvent.GotoAdd(this, new Lead))
}
}
class EditUrlResolver extends CrmUrlResolver {
protected override def handlePage(params: String*) {
val leadId: Integer = new UrlTokenizer(params(0)).getInt
EventBusFactory.getInstance.post(new LeadEvent.GotoEdit(this, leadId))
}
}
class PreviewUrlResolver extends CrmUrlResolver {
protected override def handlePage(params: String*) {
val leadId: Integer = new UrlTokenizer(params(0)).getInt
EventBusFactory.getInstance.post(new LeadEvent.GotoRead(this, leadId))
}
}
}
|
uniteddiversity/mycollab
|
mycollab-web/src/main/scala/com/esofthead/mycollab/module/crm/view/lead/LeadUrlResolver.scala
|
Scala
|
agpl-3.0
| 2,351 |
package com.caffinc.hydrangea.core.transformer
import com.caffinc.hydrangea.core.serde.KafkaRecord
/**
* Stores the KafkaRecord into a persistent storage (MongoDB)
*
* @author Sriram
*/
object StorageTransformer extends Transformer[KafkaRecord, String] {
def apply(implicit record: KafkaRecord): String = transform
override def transform(implicit record: KafkaRecord): String = {
// TODO: Store into MongoDB
record.key
}
}
|
caffinc/hydrangea
|
hydrangea/core/src/main/scala/com/caffinc/hydrangea/core/transformer/StorageTransformer.scala
|
Scala
|
mit
| 447 |
/**
* @author Yuuto
*/
package yuuto.enhancedinventories.gui
import net.minecraft.entity.player.EntityPlayer;
import net.minecraft.inventory.Container;
import net.minecraft.inventory.IInventory;
import net.minecraft.inventory.InventoryCraftResult;
import net.minecraft.inventory.InventoryCrafting;
/**
* A Dummy crafting container for use with static inventories
* @author Yuuto
*
*/
class ContainerCraftingDummy(val craftingTable:ICraftingTable) extends Container{
val craftingMatrix:InventoryCrafting = new InventoryCrafting(this, 3, 3);
val craftResult:InventoryCraftResult = new InventoryCraftResult();
override def onCraftMatrixChanged(inv:IInventory){
if(craftingTable != null)
craftingTable.onCraftMatrixChanged(inv);
}
override def canInteractWith(player:EntityPlayer):Boolean=false;
}
|
AnimeniacYuuto/EnhancedInventories
|
src/main/scala/yuuto/enhancedinventories/gui/ContainerCraftingDummy.scala
|
Scala
|
gpl-2.0
| 828 |
import java.nio.file.Paths
def test1 = {
Paths.get("")
Paths.get("", null)
Paths.get("", "")
Paths.get("", "", null)
val x1: String = ???
val x2: String | Null = ???
Paths.get("", x1)
Paths.get("", x2)
}
def test2 = {
val xs1: Seq[String] = ???
val xs2: Seq[String | Null] = ???
val xs3: Seq[String | Null] | Null = ???
val xs4: Seq[String] | Null = ???
val ys1: Array[String] = ???
val ys2: Array[String | Null] = ???
val ys3: Array[String | Null] | Null = ???
val ys4: Array[String] | Null = ???
Paths.get("", xs1: _*)
Paths.get("", xs2: _*)
Paths.get("", xs3: _*) // error
Paths.get("", xs4: _*) // error
Paths.get("", ys1: _*)
Paths.get("", ys2: _*)
Paths.get("", ys3: _*) // error
Paths.get("", ys4: _*) // error
Paths.get("", null: _*) // error
}
|
lampepfl/dotty
|
tests/explicit-nulls/unsafe-common/unsafe-java-varargs.scala
|
Scala
|
apache-2.0
| 810 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.streaming.api.scala
import org.apache.flink.annotation.PublicEvolving
import org.apache.flink.api.common.typeinfo.TypeInformation
import org.apache.flink.streaming.api.datastream.{AsyncDataStream => JavaAsyncDataStream}
import org.apache.flink.streaming.api.functions.async.{ResultFuture => JavaResultFuture}
import org.apache.flink.streaming.api.functions.async.{AsyncFunction => JavaAsyncFunction}
import org.apache.flink.streaming.api.scala.async.{AsyncFunction, JavaResultFutureWrapper, ResultFuture}
import org.apache.flink.util.Preconditions
import scala.concurrent.duration.TimeUnit
/**
* A helper class to apply [[AsyncFunction]] to a data stream.
*
* Example:
* {{{
* val input: DataStream[String] = ...
* val asyncFunction: (String, ResultFuture[String]) => Unit = ...
*
* AsyncDataStream.orderedWait(input, asyncFunction, timeout, TimeUnit.MILLISECONDS, 100)
* }}}
*/
@PublicEvolving
object AsyncDataStream {
private val DEFAULT_QUEUE_CAPACITY = 100
/**
* Apply an asynchronous function on the input data stream. The output order is only maintained
* with respect to watermarks. Stream records which lie between the same two watermarks, can be
* re-ordered.
*
* @param input to apply the async function on
* @param asyncFunction to use
* @param timeout for the asynchronous operation to complete
* @param timeUnit of the timeout
* @param capacity of the operator which is equivalent to the number of concurrent asynchronous
* operations
* @tparam IN Type of the input record
* @tparam OUT Type of the output record
* @return the resulting stream containing the asynchronous results
*/
def unorderedWait[IN, OUT: TypeInformation](
input: DataStream[IN],
asyncFunction: AsyncFunction[IN, OUT],
timeout: Long,
timeUnit: TimeUnit,
capacity: Int)
: DataStream[OUT] = {
val javaAsyncFunction = new JavaAsyncFunction[IN, OUT] {
override def asyncInvoke(input: IN, resultFuture: JavaResultFuture[OUT]): Unit = {
asyncFunction.asyncInvoke(input, new JavaResultFutureWrapper(resultFuture))
}
override def timeout(input: IN, resultFuture: JavaResultFuture[OUT]): Unit = {
asyncFunction.timeout(input, new JavaResultFutureWrapper(resultFuture))
}
}
val outType : TypeInformation[OUT] = implicitly[TypeInformation[OUT]]
asScalaStream(JavaAsyncDataStream.unorderedWait[IN, OUT](
input.javaStream,
javaAsyncFunction,
timeout,
timeUnit,
capacity).returns(outType))
}
/**
* Apply an asynchronous function on the input data stream. The output order is only maintained
* with respect to watermarks. Stream records which lie between the same two watermarks, can be
* re-ordered.
*
* @param input to apply the async function on
* @param asyncFunction to use
* @param timeout for the asynchronous operation to complete
* @param timeUnit of the timeout
* @tparam IN Type of the input record
* @tparam OUT Type of the output record
* @return the resulting stream containing the asynchronous results
*/
def unorderedWait[IN, OUT: TypeInformation](
input: DataStream[IN],
asyncFunction: AsyncFunction[IN, OUT],
timeout: Long,
timeUnit: TimeUnit)
: DataStream[OUT] = {
unorderedWait(input, asyncFunction, timeout, timeUnit, DEFAULT_QUEUE_CAPACITY)
}
/**
* Apply an asynchronous function on the input data stream. The output order is only maintained
* with respect to watermarks. Stream records which lie between the same two watermarks, can be
* re-ordered.
*
* @param input to apply the async function on
* @param timeout for the asynchronous operation to complete
* @param timeUnit of the timeout
* @param capacity of the operator which is equivalent to the number of concurrent asynchronous
* operations
* @param asyncFunction to use
* @tparam IN Type of the input record
* @tparam OUT Type of the output record
* @return the resulting stream containing the asynchronous results
*/
def unorderedWait[IN, OUT: TypeInformation](
input: DataStream[IN],
timeout: Long,
timeUnit: TimeUnit,
capacity: Int) (
asyncFunction: (IN, ResultFuture[OUT]) => Unit)
: DataStream[OUT] = {
Preconditions.checkNotNull(asyncFunction)
val cleanAsyncFunction = input.executionEnvironment.scalaClean(asyncFunction)
val func = new JavaAsyncFunction[IN, OUT] {
override def asyncInvoke(input: IN, resultFuture: JavaResultFuture[OUT]): Unit = {
cleanAsyncFunction(input, new JavaResultFutureWrapper[OUT](resultFuture))
}
}
val outType : TypeInformation[OUT] = implicitly[TypeInformation[OUT]]
asScalaStream(JavaAsyncDataStream.unorderedWait[IN, OUT](
input.javaStream,
func,
timeout,
timeUnit,
capacity).returns(outType))
}
/**
* Apply an asynchronous function on the input data stream. The output order is only maintained
* with respect to watermarks. Stream records which lie between the same two watermarks, can be
* re-ordered.
*
* @param input to apply the async function on
* @param timeout for the asynchronous operation to complete
* @param timeUnit of the timeout
* @param asyncFunction to use
* @tparam IN Type of the input record
* @tparam OUT Type of the output record
* @return the resulting stream containing the asynchronous results
*/
def unorderedWait[IN, OUT: TypeInformation](
input: DataStream[IN],
timeout: Long,
timeUnit: TimeUnit) (
asyncFunction: (IN, ResultFuture[OUT]) => Unit)
: DataStream[OUT] = {
unorderedWait(input, timeout, timeUnit, DEFAULT_QUEUE_CAPACITY)(asyncFunction)
}
/**
* Apply an asynchronous function on the input data stream. The output order is the same as the
* input order of the elements.
*
* @param input to apply the async function on
* @param asyncFunction to use
* @param timeout for the asynchronous operation to complete
* @param timeUnit of the timeout
* @param capacity of the operator which is equivalent to the number of concurrent asynchronous
* operations
* @tparam IN Type of the input record
* @tparam OUT Type of the output record
* @return the resulting stream containing the asynchronous results
*/
def orderedWait[IN, OUT: TypeInformation](
input: DataStream[IN],
asyncFunction: AsyncFunction[IN, OUT],
timeout: Long,
timeUnit: TimeUnit,
capacity: Int)
: DataStream[OUT] = {
val javaAsyncFunction = new JavaAsyncFunction[IN, OUT] {
override def asyncInvoke(input: IN, resultFuture: JavaResultFuture[OUT]): Unit = {
asyncFunction.asyncInvoke(input, new JavaResultFutureWrapper[OUT](resultFuture))
}
override def timeout(input: IN, resultFuture: JavaResultFuture[OUT]): Unit = {
asyncFunction.timeout(input, new JavaResultFutureWrapper[OUT](resultFuture))
}
}
val outType : TypeInformation[OUT] = implicitly[TypeInformation[OUT]]
asScalaStream(JavaAsyncDataStream.orderedWait[IN, OUT](
input.javaStream,
javaAsyncFunction,
timeout,
timeUnit,
capacity).returns(outType))
}
/**
* Apply an asynchronous function on the input data stream. The output order is the same as the
* input order of the elements.
*
* @param input to apply the async function on
* @param asyncFunction to use
* @param timeout for the asynchronous operation to complete
* @param timeUnit of the timeout
* @tparam IN Type of the input record
* @tparam OUT Type of the output record
* @return the resulting stream containing the asynchronous results
*/
def orderedWait[IN, OUT: TypeInformation](
input: DataStream[IN],
asyncFunction: AsyncFunction[IN, OUT],
timeout: Long,
timeUnit: TimeUnit)
: DataStream[OUT] = {
orderedWait(input, asyncFunction, timeout, timeUnit, DEFAULT_QUEUE_CAPACITY)
}
/**
* Apply an asynchronous function on the input data stream. The output order is the same as the
* input order of the elements.
*
* @param input to apply the async function on
* @param timeout for the asynchronous operation to complete
* @param timeUnit of the timeout
* @param capacity of the operator which is equivalent to the number of concurrent asynchronous
* operations
* @param asyncFunction to use
* @tparam IN Type of the input record
* @tparam OUT Type of the output record
* @return the resulting stream containing the asynchronous results
*/
def orderedWait[IN, OUT: TypeInformation](
input: DataStream[IN],
timeout: Long,
timeUnit: TimeUnit,
capacity: Int) (
asyncFunction: (IN, ResultFuture[OUT]) => Unit)
: DataStream[OUT] = {
Preconditions.checkNotNull(asyncFunction)
val cleanAsyncFunction = input.executionEnvironment.scalaClean(asyncFunction)
val func = new JavaAsyncFunction[IN, OUT] {
override def asyncInvoke(input: IN, resultFuture: JavaResultFuture[OUT]): Unit = {
cleanAsyncFunction(input, new JavaResultFutureWrapper[OUT](resultFuture))
}
}
val outType : TypeInformation[OUT] = implicitly[TypeInformation[OUT]]
asScalaStream(JavaAsyncDataStream.orderedWait[IN, OUT](
input.javaStream,
func,
timeout,
timeUnit,
capacity).returns(outType))
}
/**
* Apply an asynchronous function on the input data stream. The output order is the same as the
* input order of the elements.
*
* @param input to apply the async function on
* @param timeout for the asynchronous operation to complete
* @param timeUnit of the timeout
* @param asyncFunction to use
* @tparam IN Type of the input record
* @tparam OUT Type of the output record
* @return the resulting stream containing the asynchronous results
*/
def orderedWait[IN, OUT: TypeInformation](
input: DataStream[IN],
timeout: Long,
timeUnit: TimeUnit) (
asyncFunction: (IN, ResultFuture[OUT]) => Unit)
: DataStream[OUT] = {
orderedWait(input, timeout, timeUnit, DEFAULT_QUEUE_CAPACITY)(asyncFunction)
}
}
|
mylog00/flink
|
flink-streaming-scala/src/main/scala/org/apache/flink/streaming/api/scala/AsyncDataStream.scala
|
Scala
|
apache-2.0
| 11,195 |
package play.api.cache.redis
import scala.language.higherKinds
/**
* Redis Hashes are simply hash maps with strings as keys. It is possible to add
* elements to a Redis Hashes by adding new elements into the collection.
*
* <strong>This simplified wrapper implements only unordered Maps.</strong>
*
* @tparam Elem Data type of the inserted element
*/
trait RedisMap[Elem, Result[_]] extends RedisCollection[Map[String, Elem], Result] {
override type This = RedisMap[Elem, Result]
/**
* Insert the value at the given key into the map
*
* @param field key
* @param value inserted value
* @return the map for the chaining calls
*/
def add(field: String, value: Elem): Result[This]
/**
* Returns the value at the given key into the map
*
* @param field key
* @return Some if the value exists in the map, None otherwise
*/
def get(field: String): Result[Option[Elem]]
/**
* Returns the values stored at given keys in the map. The collection
* or results has same size as the collection of given fields, it preserves
* ordering.
*
* @param fields keys to get
* @return Some if the value exists in the map, None otherwise
*/
def getFields(fields: String*): Result[Seq[Option[Elem]]] = getFields(fields)
/**
* Returns the values stored at given keys in the map. The collection
* or results has same size as the collection of given fields, it preserves
* ordering.
*
* @param fields keys to get
* @return Some if the value exists in the map, None otherwise
*/
def getFields(fields: Iterable[String]): Result[Seq[Option[Elem]]]
/**
* <p>Tests if the field is contained in the map. Returns true if exists, otherwise returns false</p>
*
* @note <strong>Time complexity:</strong> O(1)
* @param field tested field
* @return true if exists in the map, otherwise false
*/
def contains(field: String): Result[Boolean]
/**
* <p>Removes the specified members from the sorted map stored at key. Non existing members are ignored.
* An error is returned when key exists and does not hold a sorted map.</p>
*
* @note <strong>Time complexity:</strong> O(N) where N is the number of members to be removed.
* @param field fields to be removed
* @return the map for chaining calls
*/
def remove(field: String*): Result[This]
/**
* Increment a value at the given key in the map
*
* @param field key
* @param incrementBy increment by this
* @return value after incrementation
*/
def increment(field: String, incrementBy: Long = 1): Result[Long]
/**
* <p>Returns all elements in the map</p>
*
* @note <strong>Time complexity:</strong> O(N) where N is the map cardinality.
* @return all elements in the map
*/
def toMap: Result[Map[String, Elem]]
/**
* Returns all keys defined in the map
*
* @return all used keys
*/
def keySet: Result[Set[String]]
/**
* Returns all values stored in the map
*
* @return all stored values
*/
def values: Result[Set[Elem]]
}
|
KarelCemus/play-redis
|
src/main/scala/play/api/cache/redis/RedisMap.scala
|
Scala
|
mpl-2.0
| 3,124 |
package org.broadinstitute.dsde.workbench.sam
package api
import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport._
import akka.http.scaladsl.model.headers.OAuth2BearerToken
import akka.http.scaladsl.model.{StatusCode, StatusCodes}
import akka.http.scaladsl.testkit.ScalatestRouteTest
import org.broadinstitute.dsde.workbench.model.WorkbenchIdentityJsonSupport._
import org.broadinstitute.dsde.workbench.model._
import org.broadinstitute.dsde.workbench.sam.api.ManagedGroupRoutesSpec._
import org.broadinstitute.dsde.workbench.sam.model.SamJsonSupport._
import org.broadinstitute.dsde.workbench.sam.model._
import org.broadinstitute.dsde.workbench.sam.service.ManagedGroupService
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.BeforeAndAfter
import spray.json.DefaultJsonProtocol._
import scala.language.reflectiveCalls
import org.scalatest.flatspec.AnyFlatSpec
import org.scalatest.matchers.should.Matchers
/**
* Created by gpolumbo on 2/26/2017.
*/
class ManagedGroupRoutesV1Spec extends AnyFlatSpec with ScalaFutures with Matchers with ScalatestRouteTest with TestSupport with BeforeAndAfter {
private val accessPolicyNames = Set(ManagedGroupService.adminPolicyName, ManagedGroupService.memberPolicyName, ManagedGroupService.adminNotifierPolicyName)
private val policyActions: Set[ResourceAction] = accessPolicyNames.flatMap(policyName => Set(SamResourceActions.sharePolicy(policyName), SamResourceActions.readPolicy(policyName)))
private val resourceActions = Set(ResourceAction("delete"), ResourceAction("notify_admins"), ResourceAction("set_access_instructions")) union policyActions
private val resourceActionPatterns = resourceActions.map(action => ResourceActionPattern(action.value, "", false))
private val defaultOwnerRole = ResourceRole(ManagedGroupService.adminRoleName, resourceActions)
private val defaultMemberRole = ResourceRole(ManagedGroupService.memberRoleName, Set.empty)
private val defaultAdminNotifierRole = ResourceRole(ManagedGroupService.adminNotifierRoleName, Set(ResourceAction("notify_admins")))
private val defaultRoles = Set(defaultOwnerRole, defaultMemberRole, defaultAdminNotifierRole)
private val managedGroupResourceType = ResourceType(ManagedGroupService.managedGroupTypeName, resourceActionPatterns, defaultRoles, ManagedGroupService.adminRoleName)
private val resourceTypes = Map(managedGroupResourceType.name -> managedGroupResourceType)
private val groupId = "foo"
private val defaultNewUser = UserInfo(OAuth2BearerToken("newToken"), WorkbenchUserId("NewGuy"), WorkbenchEmail("[email protected]"), 0)
private val defaultGoogleSubjectId = Option(GoogleSubjectId("NewGuy"))
def assertGroupDoesNotExist(samRoutes: SamRoutes, groupId: String = groupId): Unit = {
Get(s"/api/groups/v1/$groupId") ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.NotFound
}
}
def assertCreateGroup(samRoutes: SamRoutes, groupId: String = groupId): Unit = {
Post(s"/api/groups/v1/$groupId") ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.Created
}
}
def assertGetGroup(samRoutes: SamRoutes, groupId: String = groupId) = {
Get(s"/api/groups/v1/$groupId") ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.OK
}
}
def assertDeleteGroup(samRoutes: SamRoutes, groupId: String = groupId) = {
Delete(s"/api/groups/v1/$groupId") ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.NoContent
}
}
// Makes an anonymous object for a user acting on the same data as the user specified in samRoutes
def makeOtherUser(samRoutes: SamRoutes, userInfo: UserInfo = defaultNewUser) = new {
runAndWait(samRoutes.userService.createUser(WorkbenchUser(userInfo.userId, defaultGoogleSubjectId, userInfo.userEmail, None), samRequestContext))
val email = userInfo.userEmail
val routes = new TestSamRoutes(samRoutes.resourceService, samRoutes.policyEvaluatorService, samRoutes.userService, samRoutes.statusService, samRoutes.managedGroupService, userInfo, samRoutes.directoryDAO, samRoutes.registrationDAO)
}
def setGroupMembers(samRoutes: SamRoutes, members: Set[WorkbenchEmail], expectedStatus: StatusCode): Unit = {
Put(s"/api/groups/v1/$groupId/member", members) ~> samRoutes.route ~> check {
status shouldEqual expectedStatus
}
}
def withUserNotInGroup[T](defaultRoutes: SamRoutes)(body: TestSamRoutes => T): T = {
assertCreateGroup(defaultRoutes)
assertGetGroup(defaultRoutes)
val theDude = UserInfo(OAuth2BearerToken("tokenDude"), WorkbenchUserId("ElDudarino"), WorkbenchEmail("[email protected]"), 0)
defaultRoutes.directoryDAO.createUser(WorkbenchUser(theDude.userId, None, theDude.userEmail, None), samRequestContext).unsafeRunSync()
val dudesRoutes = new TestSamRoutes(defaultRoutes.resourceService, defaultRoutes.policyEvaluatorService, defaultRoutes.userService, defaultRoutes.statusService, defaultRoutes.managedGroupService, theDude, defaultRoutes.directoryDAO, defaultRoutes.registrationDAO)
body(dudesRoutes)
}
"GET /api/groups/v1/{groupName}" should "respond with 200 if the requesting user is in the admin policy for the group" in {
val samRoutes = TestSamRoutes(resourceTypes)
assertCreateGroup(samRoutes)
assertGetGroup(samRoutes)
}
it should "respond with 200 if the requesting user is in the member policy for the group" in {
val samRoutes = createSamRoutesWithResource(resourceTypes, Resource(ManagedGroupService.managedGroupTypeName, Generator.genResourceId.sample.get, Set.empty))
val newGuyEmail = WorkbenchEmail("[email protected]")
val newGuy = UserInfo(OAuth2BearerToken("newToken"), WorkbenchUserId("NewGuy"), newGuyEmail, 0)
val newGuyRoutes = new TestSamRoutes(samRoutes.resourceService, samRoutes.policyEvaluatorService, samRoutes.userService, samRoutes.statusService, samRoutes.managedGroupService, newGuy, samRoutes.directoryDAO, samRoutes.registrationDAO)
assertCreateGroup(samRoutes = samRoutes)
assertGetGroup(samRoutes = samRoutes)
samRoutes.userService.createUser(WorkbenchUser(newGuy.userId, defaultGoogleSubjectId, newGuy.userEmail, None), samRequestContext).futureValue
setGroupMembers(samRoutes, Set(newGuyEmail), expectedStatus = StatusCodes.Created)
assertGetGroup(newGuyRoutes)
}
it should "respond with 404 if the group does not exist" in {
val samRoutes = TestSamRoutes(resourceTypes)
assertGroupDoesNotExist(samRoutes)
}
it should "respond with 200 if the group exists but the user is not in the group" in {
val samRoutes = TestSamRoutes(resourceTypes)
assertCreateGroup(samRoutes)
val newGuyEmail = WorkbenchEmail("[email protected]")
val newGuy = UserInfo(OAuth2BearerToken("newToken"), WorkbenchUserId("NewGuy"), newGuyEmail, 0)
samRoutes.directoryDAO.createUser(WorkbenchUser(newGuy.userId, None, newGuyEmail, None), samRequestContext).unsafeRunSync()
val newGuyRoutes = new TestSamRoutes(samRoutes.resourceService, samRoutes.policyEvaluatorService, samRoutes.userService, samRoutes.statusService, samRoutes.managedGroupService, newGuy, samRoutes.mockDirectoryDao, samRoutes.mockRegistrationDao)
Get(s"/api/groups/v1/$groupId") ~> newGuyRoutes.route ~> check {
status shouldEqual StatusCodes.OK
}
}
"POST /api/groups/v1/{groupName}" should "respond 201 if the group did not already exist" in {
val samRoutes = TestSamRoutes(resourceTypes)
assertGroupDoesNotExist(samRoutes)
assertCreateGroup(samRoutes)
}
it should "fail with a 409 if the group already exists" in {
val samRoutes = TestSamRoutes(resourceTypes)
assertCreateGroup(samRoutes)
assertGetGroup(samRoutes)
Post(s"/api/groups/v1/$groupId") ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.Conflict
}
}
it should "fail with a 400 if the group name contains invalid characters" in {
val samRoutes = TestSamRoutes(resourceTypes)
assertCreateGroup(samRoutes)
assertGetGroup(samRoutes)
val badGroupName = "bad$name"
Post(s"/api/groups/v1/$badGroupName") ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.BadRequest
}
}
it should "fail with a 400 if the group name contains 64 or more characters" in {
val samRoutes = TestSamRoutes(resourceTypes)
assertCreateGroup(samRoutes)
assertGetGroup(samRoutes)
val badGroupName = "X" * 61
Post(s"/api/groups/v1/$badGroupName") ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.BadRequest
}
}
"DELETE /api/groups/v1/{groupName}" should "should respond with 204 when the group is successfully deleted" in {
val samRoutes = TestSamRoutes(resourceTypes)
assertCreateGroup(samRoutes)
assertGetGroup(samRoutes)
assertDeleteGroup(samRoutes)
assertGroupDoesNotExist(samRoutes)
}
it should "fail with 404 if the authenticated user is not in the owner policy for the group" in {
val defaultRoutes = TestSamRoutes(resourceTypes)
withUserNotInGroup(defaultRoutes){ nonMemberRoutes =>
assertGetGroup(nonMemberRoutes)
Delete(s"/api/groups/v1/$groupId") ~> nonMemberRoutes.route ~> check {
status shouldEqual StatusCodes.NotFound
}
assertGetGroup(nonMemberRoutes)
assertGetGroup(defaultRoutes)}
}
"GET /api/groups/v1/{groupName}/member" should "succeed with 200 when the group exists and the requesting user is in the group" in {
val samRoutes = TestSamRoutes(resourceTypes)
assertCreateGroup(samRoutes)
Get(s"/api/groups/v1/$groupId/member") ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.OK
responseAs[String] shouldEqual "[]"
}
}
it should "fail with 404 when the requesting user is a 'member' but not an 'admin'" in {
val samRoutes = TestSamRoutes(resourceTypes)
assertCreateGroup(samRoutes)
val newGuy = makeOtherUser(samRoutes)
setGroupMembers(samRoutes, Set(newGuy.email), expectedStatus = StatusCodes.Created)
Get(s"/api/groups/v1/$groupId/member") ~> newGuy.routes.route ~> check {
status shouldEqual StatusCodes.NotFound
}
}
it should "fail with 404 when the requesting user is not in the group" in {
withUserNotInGroup( TestSamRoutes(resourceTypes)
) { nonMemberRoutes =>
Get(s"/api/groups/v1/$groupId/members") ~> nonMemberRoutes.route ~> check {
status shouldEqual StatusCodes.NotFound}
}
}
it should "fail with 404 when the group does not exist" in {
val samRoutes = createSamRoutesWithResource(resourceTypes, Resource(ManagedGroupService.managedGroupTypeName, ResourceId("foo"), Set.empty))
Get(s"/api/groups/v1/$groupId/member") ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.NotFound
}
}
"GET /api/groups/v1/{groupName}/{policyName}" should "fail with 404 if policy name is not in [member, admin]" in {
val samRoutes = createSamRoutesWithResource(resourceTypes, Resource(ManagedGroupService.managedGroupTypeName, ResourceId("foo"), Set.empty))
assertCreateGroup(samRoutes)
Get(s"/api/groups/v1/$groupId/blah") ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.NotFound
responseAs[String] should include ("must be one of")
}
}
"PUT /api/groups/v1/{groupName}/member" should "fail with 400 when updating the 'member' policy of the group with a user who has not been created yet" in {
val samRoutes = TestSamRoutes(resourceTypes)
assertCreateGroup(samRoutes)
val newGuyEmail = WorkbenchEmail("[email protected]")
val members = Set(newGuyEmail)
Put(s"/api/groups/v1/$groupId/member", members) ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.BadRequest
responseAs[String] should include (newGuyEmail.toString())
}
}
it should "succeed with 201 after successfully updating the 'member' policy of the group" in {
val samRoutes = TestSamRoutes(resourceTypes)
assertCreateGroup(samRoutes)
val newGuy = makeOtherUser(samRoutes)
setGroupMembers(samRoutes, Set(newGuy.email), expectedStatus = StatusCodes.Created)
}
it should "fail with 404 when the requesting user is not in the admin policy for the group" in {
val samRoutes = TestSamRoutes(resourceTypes)
assertCreateGroup(samRoutes)
val newGuy = makeOtherUser(samRoutes)
setGroupMembers(newGuy.routes, Set(newGuy.email), expectedStatus = StatusCodes.NotFound)
}
it should "fail with 404 when the group does not exist" in {
val samRoutes = createSamRoutesWithResource(resourceTypes, Resource(ManagedGroupService.managedGroupTypeName, ResourceId("foo"), Set.empty))
val newGuy = makeOtherUser(samRoutes)
setGroupMembers(samRoutes, Set(newGuy.email), expectedStatus = StatusCodes.NotFound)
}
it should "fail with 500 when any of the email addresses being added are invalid" in {
val samRoutes = TestSamRoutes(resourceTypes)
assertCreateGroup(samRoutes)
val newGuyEmail = WorkbenchEmail("I'm not an email address but I should be")
val members = Set(newGuyEmail)
Put(s"/api/groups/v1/$groupId/member", members) ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.BadRequest
responseAs[String] should include (newGuyEmail.toString())
}
}
"GET /api/groups/v1/{groupName}/admin" should "succeed with 200 when the group exists and the requesting user is in the group" in {
val samRoutes = TestSamRoutes(resourceTypes)
assertCreateGroup(samRoutes)
assertGetGroup(samRoutes)
Get(s"/api/groups/v1/$groupId/admin") ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.OK
responseAs[String] should include (TestSamRoutes.defaultUserInfo.userEmail.value)
}
}
it should "fail with 404 when the requesting user is a 'member' but not an 'admin'" in {
val samRoutes = TestSamRoutes(resourceTypes)
assertCreateGroup(samRoutes)
val newGuy = makeOtherUser(samRoutes)
setGroupMembers(samRoutes, Set(newGuy.email), expectedStatus = StatusCodes.Created)
Get(s"/api/groups/v1/$groupId/admin") ~> newGuy.routes.route ~> check {
status shouldEqual StatusCodes.NotFound
}
}
it should "fail with 404 when the requesting user is not in the group" in {
withUserNotInGroup( TestSamRoutes(resourceTypes)
) { nonMemberRoutes =>
Get(s"/api/groups/v1/$groupId/admin") ~> nonMemberRoutes.route ~> check {
status shouldEqual StatusCodes.NotFound}
}
}
it should "fail with 404 when the group does not exist" in {
val samRoutes = createSamRoutesWithResource(resourceTypes, Resource(ManagedGroupService.managedGroupTypeName, ResourceId("foo"), Set.empty))
Get(s"/api/groups/v1/$groupId/admin") ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.NotFound
}
}
"PUT /api/groups/v1/{groupName}/admin" should "fail with 400 when updating the 'admin' policy of the group with a user who has not been created yet" in {
val samRoutes = TestSamRoutes(resourceTypes)
assertCreateGroup(samRoutes)
val newGuyEmail = WorkbenchEmail("[email protected]")
val members = Set(newGuyEmail)
Put(s"/api/groups/v1/$groupId/admin", members) ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.BadRequest
responseAs[String] should include (newGuyEmail.toString())
}
}
it should "succeed with 201 after successfully updating the 'admin' policy of the group" in {
val samRoutes = TestSamRoutes(resourceTypes)
assertCreateGroup(samRoutes)
val newGuy = makeOtherUser(samRoutes)
val members = Set(newGuy.email)
Put(s"/api/groups/v1/$groupId/admin", members) ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.Created
}
}
it should "fail with 404 when the requesting user is not in the admin policy for the group" in {
val samRoutes = TestSamRoutes(resourceTypes)
assertCreateGroup(samRoutes)
val newGuy = makeOtherUser(samRoutes)
val members = Set(newGuy.email)
Put(s"/api/groups/v1/$groupId/admin", members) ~> newGuy.routes.route ~> check {
status shouldEqual StatusCodes.NotFound
}
}
it should "fail with 404 when the group does not exist" in {
val samRoutes = createSamRoutesWithResource(resourceTypes, Resource(ManagedGroupService.managedGroupTypeName, ResourceId("foo"), Set.empty))
val newGuyEmail = WorkbenchEmail("[email protected]")
val members = Set(newGuyEmail)
Put(s"/api/groups/v1/$groupId/admin", members) ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.NotFound
}
}
it should "fail with 500 when any of the email addresses being added are invalid" in {
val samRoutes = TestSamRoutes(resourceTypes)
assertCreateGroup(samRoutes)
val newGuyEmail = WorkbenchEmail("An Invalid email address")
val members = Set(newGuyEmail)
Put(s"/api/groups/v1/$groupId/admin", members) ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.BadRequest
responseAs[String] should include (newGuyEmail.toString())
}
}
"PUT /api/groups/v1/{groupName}/{policyName}/{email}" should "respond with 204 and add the email address to the specified group and policy" in {
val samRoutes = TestSamRoutes(resourceTypes)
assertCreateGroup(samRoutes)
val newGuy = makeOtherUser(samRoutes)
Put(s"/api/groups/v1/$groupId/admin/${newGuy.email}") ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.NoContent
}
}
it should "respond with 204 when the email address is already in the group and policy" in {
val samRoutes = TestSamRoutes(resourceTypes)
assertCreateGroup(samRoutes)
val defaultUserInfo = samRoutes.userInfo
Put(s"/api/groups/v1/$groupId/admin/${defaultUserInfo.userEmail}") ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.NoContent
}
}
it should "respond with 404 when the group does not exist" in {
val samRoutes = createSamRoutesWithResource(resourceTypes, Resource(ManagedGroupService.managedGroupTypeName, ResourceId("foo"), Set.empty))
val newGuy = makeOtherUser(samRoutes)
Put(s"/api/groups/v1/$groupId/admin/${newGuy.email}") ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.NotFound
}
}
it should "respond with 400 when the email address is invalid" in {
val samRoutes = TestSamRoutes(resourceTypes)
assertCreateGroup(samRoutes)
val notAnEmail = "NotAnEmailAddress"
Put(s"/api/groups/v1/$groupId/admin/$notAnEmail") ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.BadRequest
}
}
it should "respond with 404 when the policy is invalid" in {
val samRoutes = TestSamRoutes(resourceTypes)
assertCreateGroup(samRoutes)
val newGuy = makeOtherUser(samRoutes)
Put(s"/api/groups/v1/$groupId/xmen/${newGuy.email}") ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.NotFound
}
}
it should "respond with 404 when the requesting user does not have any permissions in the group and policy" in {
val samRoutes = TestSamRoutes(resourceTypes)
assertCreateGroup(samRoutes)
val newGuy = makeOtherUser(samRoutes)
Put(s"/api/groups/v1/$groupId/admin/${samRoutes.userInfo.userEmail}") ~> newGuy.routes.route ~> check {
status shouldEqual StatusCodes.NotFound
}
}
// TODO: In order to be able to delete the subject, they need to exist in opendj. Is this what we want?
"DELETE /api/groups/v1/{groupName}/{policyName}/{email}" should "respond with 204 and remove the email address from the specified group and policy" in {
val samRoutes = TestSamRoutes(resourceTypes)
assertCreateGroup(samRoutes)
Delete(s"/api/groups/v1/$groupId/admin/${samRoutes.userInfo.userEmail}") ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.NoContent
}
}
// TODO: I think this should just work and give back a 204
// TODO: well i changed something and now it returns a 204 so maybe this TODO above is complete? Must investigate...
it should "respond with 404 when the email address was already not present in the group and policy" in {
val samRoutes = TestSamRoutes(resourceTypes)
assertCreateGroup(samRoutes)
Delete(s"/api/groups/v1/$groupId/admin/${samRoutes.userInfo.userEmail}") ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.NoContent
}
}
it should "respond with 404 when the group does not exist" in {
val samRoutes = createSamRoutesWithResource(resourceTypes, Resource(ManagedGroupService.managedGroupTypeName, ResourceId("foo"), Set.empty))
Delete(s"/api/groups/v1/$groupId/admin/${samRoutes.userInfo.userEmail}") ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.NotFound
}
}
it should "respond with 404 when the policy is invalid" in {
val samRoutes = createSamRoutesWithResource(resourceTypes, Resource(ManagedGroupService.managedGroupTypeName, Generator.genResourceId.sample.get, Set.empty))
assertCreateGroup(samRoutes)
Delete(s"/api/groups/v1/$groupId/people/${samRoutes.userInfo.userEmail}") ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.NotFound
}
}
it should "respond with 404 when the requesting user does not have permissions to edit the group and policy" in {
val samRoutes = TestSamRoutes(resourceTypes)
assertCreateGroup(samRoutes)
val newGuy = makeOtherUser(samRoutes)
Delete(s"/api/groups/v1/$groupId/admin/${samRoutes.userInfo.userEmail}") ~> newGuy.routes.route ~> check {
status shouldEqual StatusCodes.NotFound
}
}
"GET /api/groups/v1" should "respond with 200 and a list of managed groups the authenticated user belongs to" in {
val samRoutes = TestSamRoutes(resourceTypes)
val groupNames = Set("foo", "bar", "baz")
groupNames.foreach(assertCreateGroup(samRoutes, _))
Get("/api/groups/v1") ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.OK
val res = responseAs[String]
groupNames.foreach(res should include (_))
res should include ("admin")
res shouldNot include ("member")
}
}
it should "respond with 200 and an empty list if the user is not a member of any managed groups" in {
val samRoutes = TestSamRoutes(resourceTypes)
Get("/api/groups/v1") ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.OK
responseAs[String] shouldEqual "[]"
}
}
"GET /api/groups/v1/{groupName}/admin-notifier" should "succeed with 200 when the group exists and the requesting user is a group admin" in {
val samRoutes = TestSamRoutes(resourceTypes)
assertCreateGroup(samRoutes)
assertGetGroup(samRoutes)
Get(s"/api/groups/v1/$groupId/admin-notifier") ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.OK
}
}
it should "fail with 404 when the requesting user is a 'member' but not an 'admin'" in {
val samRoutes: TestSamRoutes = TestSamRoutes(resourceTypes)
assertCreateGroup(samRoutes)
val newGuy = makeOtherUser(samRoutes)
setGroupMembers(samRoutes, Set(newGuy.email), expectedStatus = StatusCodes.Created)
Get(s"/api/groups/v1/$groupId/admin-notifier") ~> newGuy.routes.route ~> check {
status shouldEqual StatusCodes.NotFound
}
}
it should "fail with 404 when the requesting user is not in the group" in {
withUserNotInGroup(TestSamRoutes(resourceTypes)) { nonMemberRoutes =>
Get(s"/api/groups/v1/$groupId/admin-notifier") ~> nonMemberRoutes.route ~> check {
status shouldEqual StatusCodes.NotFound
}
}
}
it should "fail with 404 when the group does not exist" in {
val samRoutes = createSamRoutesWithResource(resourceTypes, Resource(ManagedGroupService.managedGroupTypeName, ResourceId("foo"), Set.empty))
Get(s"/api/groups/v1/$groupId/admin") ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.NotFound
}
}
"PUT /api/groups/v1/{groupName}/admin-notifier" should "succeed with 201 after successfully updating the 'admin-notifier' policy" in {
val samRoutes = TestSamRoutes(resourceTypes)
assertCreateGroup(samRoutes)
val newGuy = makeOtherUser(samRoutes)
Put(s"/api/groups/v1/$groupId/admin-notifier", Set(newGuy.email)) ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.Created
}
}
it should "fail with 404 when the requesting user is not in the admin policy for the group" in {
val samRoutes = TestSamRoutes(resourceTypes)
assertCreateGroup(samRoutes)
val newGuy = makeOtherUser(samRoutes)
Put(s"/api/groups/v1/$groupId/admin-notifier", Set(newGuy.email)) ~> newGuy.routes.route ~> check {
status shouldEqual StatusCodes.NotFound
}
}
"PUT /api/groups/v1/{groupName}/accessInstructions" should "succeed with 204" in {
val samRoutes = TestSamRoutes(resourceTypes)
assertCreateGroup(samRoutes)
val instructions = "Test instructions"
Put(s"/api/groups/v1/$groupId/accessInstructions", ManagedGroupAccessInstructions(instructions)) ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.NoContent
}
}
"GET /api/groups/v1/{groupName}/accessInstructions" should "succeed with 200 and return the access instructions when group and access instructions exist" in {
val samRoutes = TestSamRoutes(resourceTypes)
assertCreateGroup(samRoutes)
val instructions = "Test instructions"
Put(s"/api/groups/v1/$groupId/accessInstructions", ManagedGroupAccessInstructions(instructions)) ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.NoContent
}
Get(s"/api/groups/v1/$groupId/accessInstructions") ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.OK
responseAs[String] shouldEqual (instructions)
}
}
it should "succeed with 204 when the group exists but access instructions are not set" in {
val samRoutes = TestSamRoutes(resourceTypes)
assertCreateGroup(samRoutes)
Get(s"/api/groups/v1/$groupId/accessInstructions") ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.NoContent
}
}
it should "fail with 404 when the group does not exist" in {
val samRoutes = TestSamRoutes(resourceTypes)
Get(s"/api/groups/v1/$groupId/accessInstructions") ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.NotFound
}
}
"POST /api/groups/v1/{groupName}/requestAccess" should "succeed with 204 when the group exists" in {
val samRoutes = TestSamRoutes(resourceTypes)
assertCreateGroup(samRoutes)
Post(s"/api/groups/v1/$groupId/requestAccess") ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.NoContent
}
}
it should "fail with 400 if there are access instructions" in {
val samRoutes = TestSamRoutes(resourceTypes)
assertCreateGroup(samRoutes)
val instructions = "Test instructions"
Put(s"/api/groups/v1/$groupId/accessInstructions", ManagedGroupAccessInstructions(instructions)) ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.NoContent
}
Post(s"/api/groups/v1/$groupId/requestAccess") ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.BadRequest
responseAs[String] should include (instructions)
}
}
it should "fail with 404 when group does not exist" in {
val samRoutes = createSamRoutesWithResource(resourceTypes, Resource(ManagedGroupService.managedGroupTypeName, ResourceId("foo"), Set.empty))
Post(s"/api/groups/v1/$groupId/requestAccess") ~> samRoutes.route ~> check {
status shouldEqual StatusCodes.NotFound
}
}
}
|
broadinstitute/sam
|
src/test/scala/org/broadinstitute/dsde/workbench/sam/api/ManagedGroupRoutesV1Spec.scala
|
Scala
|
bsd-3-clause
| 27,690 |
package play.api.db
import scala.language.reflectiveCalls
import play.api._
import play.api.libs._
import play.core._
import java.sql._
import javax.sql._
import com.jolbox.bonecp._
import com.jolbox.bonecp.hooks._
import scala.util.control.{ NonFatal, ControlThrowable }
/**
* The Play Database API manages several connection pools.
*
* @param datasources the managed data sources
*/
trait DBApi {
val datasources: List[(DataSource, String)]
/**
* Shutdown pool for given datasource
*/
def shutdownPool(ds: DataSource)
/**
* Retrieves a JDBC connection, with auto-commit set to `true`.
*
* Don't forget to release the connection at some point by calling close().
*
* @param name the data source name
* @return a JDBC connection
* @throws an error if the required data source is not registered
*/
def getDataSource(name: String): DataSource
/**
* Retrieves the JDBC connection URL for a particular data source.
*
* @param name the data source name
* @return The JDBC URL connection string, i.e. `jdbc:...`
* @throws an error if the required data source is not registered
*/
def getDataSourceURL(name: String): String = {
val connection = getDataSource(name).getConnection
val url = connection.getMetaData.getURL
connection.close()
url
}
/**
* Retrieves a JDBC connection.
*
* Don't forget to release the connection at some point by calling close().
*
* @param name the data source name
* @param autocommit when `true`, sets this connection to auto-commit
* @return a JDBC connection
* @throws an error if the required data source is not registered
*/
def getConnection(name: String, autocommit: Boolean = true): Connection = {
val connection = getDataSource(name).getConnection
connection.setAutoCommit(autocommit)
connection
}
/**
* Execute a block of code, providing a JDBC connection. The connection and all created statements are
* automatically released.
*
* @param name The datasource name.
* @param block Code block to execute.
*/
def withConnection[A](name: String)(block: Connection => A): A = {
val connection = new AutoCleanConnection(getConnection(name))
try {
block(connection)
} finally {
connection.close()
}
}
/**
* Execute a block of code, in the scope of a JDBC transaction.
* The connection and all created statements are automatically released.
* The transaction is automatically committed, unless an exception occurs.
*
* @param name The datasource name.
* @param block Code block to execute.
*/
def withTransaction[A](name: String)(block: Connection => A): A = {
withConnection(name) { connection =>
try {
connection.setAutoCommit(false)
val r = block(connection)
connection.commit()
r
} catch {
case e: ControlThrowable => connection.commit(); throw e
case NonFatal(e) => connection.rollback(); throw e
}
}
}
}
/**
* Provides a high-level API for getting JDBC connections.
*
* For example:
* {{{
* val conn = DB.getConnection("customers")
* }}}
*/
object DB {
/** The exception we are throwing. */
private def error = throw new Exception("DB plugin is not registered.")
/**
* Retrieves a JDBC connection.
*
* @param name data source name
* @param autocommit when `true`, sets this connection to auto-commit
* @return a JDBC connection
* @throws an error if the required data source is not registered
*/
def getConnection(name: String = "default", autocommit: Boolean = true)(implicit app: Application): Connection = app.plugin[DBPlugin].map(_.api.getConnection(name, autocommit)).getOrElse(error)
/**
* Retrieves a JDBC connection (autocommit is set to true).
*
* @param name data source name
* @return a JDBC connection
* @throws an error if the required data source is not registered
*/
def getDataSource(name: String = "default")(implicit app: Application): DataSource = app.plugin[DBPlugin].map(_.api.getDataSource(name)).getOrElse(error)
/**
* Execute a block of code, providing a JDBC connection. The connection is
* automatically released.
*
* @param name The datasource name.
* @param block Code block to execute.
*/
def withConnection[A](name: String)(block: Connection => A)(implicit app: Application): A = {
app.plugin[DBPlugin].map(_.api.withConnection(name)(block)).getOrElse(error)
}
/**
* Execute a block of code, providing a JDBC connection. The connection and all created statements are
* automatically released.
*
* @param block Code block to execute.
*/
def withConnection[A](block: Connection => A)(implicit app: Application): A = {
app.plugin[DBPlugin].map(_.api.withConnection("default")(block)).getOrElse(error)
}
/**
* Execute a block of code, in the scope of a JDBC transaction.
* The connection and all created statements are automatically released.
* The transaction is automatically committed, unless an exception occurs.
*
* @param name The datasource name.
* @param block Code block to execute.
*/
def withTransaction[A](name: String = "default")(block: Connection => A)(implicit app: Application): A = {
app.plugin[DBPlugin].map(_.api.withTransaction(name)(block)).getOrElse(error)
}
/**
* Execute a block of code, in the scope of a JDBC transaction.
* The connection and all created statements are automatically released.
* The transaction is automatically committed, unless an exception occurs.
*
* @param block Code block to execute.
*/
def withTransaction[A](block: Connection => A)(implicit app: Application): A = {
app.plugin[DBPlugin].map(_.api.withTransaction("default")(block)).getOrElse(error)
}
}
/**
* Generic DBPlugin interface
*/
trait DBPlugin extends Plugin {
def api: DBApi
}
/**
* A DBPlugin implementation that provides a DBApi
*
* @param app the application that is registering the plugin
*/
class BoneCPPlugin(app: Application) extends DBPlugin {
private def error = throw new Exception("db keys are missing from application.conf")
lazy val dbConfig = app.configuration.getConfig("db").getOrElse(Configuration.empty)
private def dbURL(conn: Connection): String = {
val u = conn.getMetaData.getURL
conn.close()
u
}
// should be accessed in onStart first
private lazy val dbApi: DBApi = new BoneCPApi(dbConfig, app.classloader)
/**
* plugin is disabled if either configuration is missing or the plugin is explicitly disabled
*/
private lazy val isDisabled = {
app.configuration.getString("dbplugin").filter(_ == "disabled").isDefined || dbConfig.subKeys.isEmpty
}
/**
* Is this plugin enabled.
*
* {{{
* dbplugin=disabled
* }}}
*/
override def enabled = isDisabled == false
/**
* Retrieves the underlying `DBApi` managing the data sources.
*/
def api: DBApi = dbApi
/**
* Reads the configuration and connects to every data source.
*/
override def onStart() {
// Try to connect to each, this should be the first access to dbApi
dbApi.datasources.map { ds =>
try {
ds._1.getConnection.close()
app.mode match {
case Mode.Test =>
case mode => Play.logger.info("database [" + ds._2 + "] connected at " + dbURL(ds._1.getConnection))
}
} catch {
case NonFatal(e) => {
throw dbConfig.reportError(ds._2 + ".url", "Cannot connect to database [" + ds._2 + "]", Some(e.getCause))
}
}
}
}
/**
* Closes all data sources.
*/
override def onStop() {
dbApi.datasources.foreach {
case (ds, _) => try {
dbApi.shutdownPool(ds)
} catch { case NonFatal(_) => }
}
val drivers = DriverManager.getDrivers()
while (drivers.hasMoreElements) {
val driver = drivers.nextElement
DriverManager.deregisterDriver(driver)
}
}
}
private[db] class BoneCPApi(configuration: Configuration, classloader: ClassLoader) extends DBApi {
private def error(db: String, message: String = "") = throw configuration.reportError(db, message)
private val dbNames = configuration.subKeys
private def register(driver: String, c: Configuration) {
try {
DriverManager.registerDriver(new play.utils.ProxyDriver(Class.forName(driver, true, classloader).newInstance.asInstanceOf[Driver]))
} catch {
case NonFatal(e) => throw c.reportError("driver", "Driver not found: [" + driver + "]", Some(e))
}
}
private def createDataSource(dbName: String, url: String, driver: String, conf: Configuration): DataSource = {
val datasource = new BoneCPDataSource
// Try to load the driver
conf.getString("driver").map { driver =>
try {
DriverManager.registerDriver(new play.utils.ProxyDriver(Class.forName(driver, true, classloader).newInstance.asInstanceOf[Driver]))
} catch {
case NonFatal(e) => throw conf.reportError("driver", "Driver not found: [" + driver + "]", Some(e))
}
}
val autocommit = conf.getBoolean("autocommit").getOrElse(true)
val isolation = conf.getString("isolation").map {
case "NONE" => Connection.TRANSACTION_NONE
case "READ_COMMITTED" => Connection.TRANSACTION_READ_COMMITTED
case "READ_UNCOMMITTED " => Connection.TRANSACTION_READ_UNCOMMITTED
case "REPEATABLE_READ " => Connection.TRANSACTION_REPEATABLE_READ
case "SERIALIZABLE" => Connection.TRANSACTION_SERIALIZABLE
case unknown => throw conf.reportError("isolation", "Unknown isolation level [" + unknown + "]")
}
val catalog = conf.getString("defaultCatalog")
val readOnly = conf.getBoolean("readOnly").getOrElse(false)
datasource.setClassLoader(classloader)
val logger = Logger("com.jolbox.bonecp")
// Re-apply per connection config @ checkout
datasource.setConnectionHook(new AbstractConnectionHook {
override def onCheckIn(connection: ConnectionHandle) {
if (logger.isTraceEnabled) {
logger.trace("Check in connection %s [%s leased]".format(connection.toString, datasource.getTotalLeased))
}
}
override def onCheckOut(connection: ConnectionHandle) {
connection.setAutoCommit(autocommit)
isolation.map(connection.setTransactionIsolation(_))
connection.setReadOnly(readOnly)
catalog.map(connection.setCatalog(_))
if (logger.isTraceEnabled) {
logger.trace("Check out connection %s [%s leased]".format(connection.toString, datasource.getTotalLeased))
}
}
})
val PostgresFullUrl = "^postgres://([a-zA-Z0-9_]+):([^@]+)@([^/]+)/([^\\\\s]+)$".r
val MysqlFullUrl = "^mysql://([a-zA-Z0-9_]+):([^@]+)@([^/]+)/([^\\\\s]+)$".r
val MysqlCustomProperties = ".*\\\\?(.*)".r
val H2DefaultUrl = "^jdbc:h2:mem:.+".r
conf.getString("url") match {
case Some(PostgresFullUrl(username, password, host, dbname)) =>
datasource.setJdbcUrl("jdbc:postgresql://%s/%s".format(host, dbname))
datasource.setUsername(username)
datasource.setPassword(password)
case Some(url @ MysqlFullUrl(username, password, host, dbname)) =>
val defaultProperties = """?useUnicode=yes&characterEncoding=UTF-8&connectionCollation=utf8_general_ci"""
val addDefaultPropertiesIfNeeded = MysqlCustomProperties.findFirstMatchIn(url).map(_ => "").getOrElse(defaultProperties)
datasource.setJdbcUrl("jdbc:mysql://%s/%s".format(host, dbname + addDefaultPropertiesIfNeeded))
datasource.setUsername(username)
datasource.setPassword(password)
case Some(url @ H2DefaultUrl()) if !url.contains("DB_CLOSE_DELAY") =>
if (Play.maybeApplication.exists(_.mode == Mode.Dev)) {
datasource.setJdbcUrl(url + ";DB_CLOSE_DELAY=-1")
} else {
datasource.setJdbcUrl(url)
}
case Some(s: String) =>
datasource.setJdbcUrl(s)
case _ =>
throw conf.globalError("Missing url configuration for database [%s]".format(conf))
}
conf.getString("user").map(datasource.setUsername(_))
conf.getString("pass").map(datasource.setPassword(_))
conf.getString("password").map(datasource.setPassword(_))
// Pool configuration
datasource.setPartitionCount(conf.getInt("partitionCount").getOrElse(1))
datasource.setMaxConnectionsPerPartition(conf.getInt("maxConnectionsPerPartition").getOrElse(30))
datasource.setMinConnectionsPerPartition(conf.getInt("minConnectionsPerPartition").getOrElse(5))
datasource.setAcquireIncrement(conf.getInt("acquireIncrement").getOrElse(1))
datasource.setAcquireRetryAttempts(conf.getInt("acquireRetryAttempts").getOrElse(10))
datasource.setAcquireRetryDelayInMs(conf.getMilliseconds("acquireRetryDelay").getOrElse(1000))
datasource.setConnectionTimeoutInMs(conf.getMilliseconds("connectionTimeout").getOrElse(1000))
datasource.setIdleMaxAge(conf.getMilliseconds("idleMaxAge").getOrElse(1000 * 60 * 10), java.util.concurrent.TimeUnit.MILLISECONDS)
datasource.setMaxConnectionAge(conf.getMilliseconds("maxConnectionAge").getOrElse(1000 * 60 * 60), java.util.concurrent.TimeUnit.MILLISECONDS)
datasource.setDisableJMX(conf.getBoolean("disableJMX").getOrElse(true))
datasource.setStatisticsEnabled(conf.getBoolean("statisticsEnabled").getOrElse(false))
datasource.setIdleConnectionTestPeriod(conf.getMilliseconds("idleConnectionTestPeriod").getOrElse(1000 * 60), java.util.concurrent.TimeUnit.MILLISECONDS)
datasource.setDisableConnectionTracking(conf.getBoolean("disableConnectionTracking").getOrElse(true))
conf.getString("initSQL").map(datasource.setInitSQL(_))
conf.getBoolean("logStatements").map(datasource.setLogStatementsEnabled(_))
conf.getString("connectionTestStatement").map(datasource.setConnectionTestStatement(_))
// Bind in JNDI
conf.getString("jndiName").map { name =>
JNDI.initialContext.rebind(name, datasource)
Play.logger.info("datasource [" + conf.getString("url").get + "] bound to JNDI as " + name)
}
datasource
}
val datasources: List[Tuple2[DataSource, String]] = dbNames.map { dbName =>
val url = configuration.getString(dbName + ".url").getOrElse(error(dbName, "Missing configuration [db." + dbName + ".url]"))
val driver = configuration.getString(dbName + ".driver").getOrElse(error(dbName, "Missing configuration [db." + dbName + ".driver]"))
val extraConfig = configuration.getConfig(dbName).getOrElse(error(dbName, "Missing configuration [db." + dbName + "]"))
register(driver, extraConfig)
createDataSource(dbName, url, driver, extraConfig) -> dbName
}.toList
def shutdownPool(ds: DataSource) = {
ds match {
case ds: BoneCPDataSource => ds.close()
case _ => error(" - could not recognize DataSource, therefore unable to shutdown this pool")
}
}
/**
* Retrieves a JDBC connection, with auto-commit set to `true`.
*
* Don't forget to release the connection at some point by calling close().
*
* @param name the data source name
* @return a JDBC connection
* @throws an error if the required data source is not registered
*/
def getDataSource(name: String): DataSource = {
datasources.filter(_._2 == name).headOption.map(e => e._1).getOrElse(error(" - could not find datasource for " + name))
}
}
/**
* Provides an interface for retreiving the jdbc driver's implementation of java.sql.Connection
* from a "decorated" Connection (such as the Connection that DB.withConnection provides). Upcasting
* to this trait should be used with caution since exposing the internal jdbc connection can violate the
* guarantees Play otherwise makes (like automatically closing jdbc statements created from the connection)
*/
trait HasInternalConnection {
def getInternalConnection(): Connection
}
/**
* A connection that automatically releases statements on close
*/
private class AutoCleanConnection(connection: Connection) extends Connection with HasInternalConnection {
private val statements = scala.collection.mutable.ListBuffer.empty[Statement]
private def registering[T <: Statement](b: => T) = {
val statement = b
statements += statement
statement
}
private def releaseStatements() {
statements.foreach { statement =>
statement.close()
}
statements.clear()
}
override def getInternalConnection(): Connection = connection match {
case bonecpConn: com.jolbox.bonecp.ConnectionHandle =>
bonecpConn.getInternalConnection()
case x => x
}
def createStatement() = registering(connection.createStatement())
def createStatement(resultSetType: Int, resultSetConcurrency: Int) = registering(connection.createStatement(resultSetType, resultSetConcurrency))
def createStatement(resultSetType: Int, resultSetConcurrency: Int, resultSetHoldability: Int) = registering(connection.createStatement(resultSetType, resultSetConcurrency, resultSetHoldability))
def prepareStatement(sql: String) = registering(connection.prepareStatement(sql))
def prepareStatement(sql: String, autoGeneratedKeys: Int) = registering(connection.prepareStatement(sql, autoGeneratedKeys))
def prepareStatement(sql: String, columnIndexes: scala.Array[Int]) = registering(connection.prepareStatement(sql, columnIndexes))
def prepareStatement(sql: String, resultSetType: Int, resultSetConcurrency: Int) = registering(connection.prepareStatement(sql, resultSetType, resultSetConcurrency))
def prepareStatement(sql: String, resultSetType: Int, resultSetConcurrency: Int, resultSetHoldability: Int) = registering(connection.prepareStatement(sql, resultSetType, resultSetConcurrency, resultSetHoldability))
def prepareStatement(sql: String, columnNames: scala.Array[String]) = registering(connection.prepareStatement(sql, columnNames))
def prepareCall(sql: String) = registering(connection.prepareCall(sql))
def prepareCall(sql: String, resultSetType: Int, resultSetConcurrency: Int) = registering(connection.prepareCall(sql, resultSetType, resultSetConcurrency))
def prepareCall(sql: String, resultSetType: Int, resultSetConcurrency: Int, resultSetHoldability: Int) = registering(connection.prepareCall(sql, resultSetType, resultSetConcurrency, resultSetHoldability))
def close() {
releaseStatements()
connection.close()
}
def clearWarnings() { connection.clearWarnings() }
def commit() { connection.commit() }
def createArrayOf(typeName: String, elements: scala.Array[AnyRef]) = connection.createArrayOf(typeName, elements)
def createBlob() = connection.createBlob()
def createClob() = connection.createClob()
def createNClob() = connection.createNClob()
def createSQLXML() = connection.createSQLXML()
def createStruct(typeName: String, attributes: scala.Array[AnyRef]) = connection.createStruct(typeName, attributes)
def getAutoCommit() = connection.getAutoCommit()
def getCatalog() = connection.getCatalog()
def getClientInfo() = connection.getClientInfo()
def getClientInfo(name: String) = connection.getClientInfo(name)
def getHoldability() = connection.getHoldability()
def getMetaData() = connection.getMetaData()
def getTransactionIsolation() = connection.getTransactionIsolation()
def getTypeMap() = connection.getTypeMap()
def getWarnings() = connection.getWarnings()
def isClosed() = connection.isClosed()
def isReadOnly() = connection.isReadOnly()
def isValid(timeout: Int) = connection.isValid(timeout)
def nativeSQL(sql: String) = connection.nativeSQL(sql)
def releaseSavepoint(savepoint: Savepoint) { connection.releaseSavepoint(savepoint) }
def rollback() { connection.rollback() }
def rollback(savepoint: Savepoint) { connection.rollback(savepoint) }
def setAutoCommit(autoCommit: Boolean) { connection.setAutoCommit(autoCommit) }
def setCatalog(catalog: String) { connection.setCatalog(catalog) }
def setClientInfo(properties: java.util.Properties) { connection.setClientInfo(properties) }
def setClientInfo(name: String, value: String) { connection.setClientInfo(name, value) }
def setHoldability(holdability: Int) { connection.setHoldability(holdability) }
def setReadOnly(readOnly: Boolean) { connection.setReadOnly(readOnly) }
def setSavepoint() = connection.setSavepoint()
def setSavepoint(name: String) = connection.setSavepoint(name)
def setTransactionIsolation(level: Int) { connection.setTransactionIsolation(level) }
def setTypeMap(map: java.util.Map[String, Class[_]]) { connection.setTypeMap(map) }
def isWrapperFor(iface: Class[_]) = connection.isWrapperFor(iface)
def unwrap[T](iface: Class[T]) = connection.unwrap(iface)
// JDBC 4.1
def getSchema() = {
connection.asInstanceOf[{ def getSchema(): String }].getSchema()
}
def setSchema(schema: String) {
connection.asInstanceOf[{ def setSchema(schema: String): Unit }].setSchema(schema)
}
def getNetworkTimeout() = {
connection.asInstanceOf[{ def getNetworkTimeout(): Int }].getNetworkTimeout()
}
def setNetworkTimeout(executor: java.util.concurrent.Executor, milliseconds: Int) {
connection.asInstanceOf[{ def setNetworkTimeout(executor: java.util.concurrent.Executor, milliseconds: Int): Unit }].setNetworkTimeout(executor, milliseconds)
}
def abort(executor: java.util.concurrent.Executor) {
connection.asInstanceOf[{ def abort(executor: java.util.concurrent.Executor): Unit }].abort(executor)
}
}
|
vangav/vos_backend
|
play-2.2.6/framework/src/play-jdbc/src/main/scala/play/api/db/DB.scala
|
Scala
|
mit
| 21,505 |
// @SOURCE:D:/git/trask/glowroot/agent-parent/plugins/play-plugin/tmp-router-files/conf/routes
// @HASH:0e9301dd33213e860c252067b6753b9f2381dcdd
// @DATE:Sat Apr 09 16:55:08 PDT 2016
import Routes.{prefix => _prefix, defaultPrefix => _defaultPrefix}
import play.core._
import play.core.Router._
import play.core.j._
import play.api.mvc._
import play.libs.F
import Router.queryString
// @LINE:9
// @LINE:8
// @LINE:7
// @LINE:6
// @LINE:5
package controllers {
// @LINE:8
class ReverseAssets {
// @LINE:8
def at(file:String): Call = {
Call("GET", _prefix + { _defaultPrefix } + "assets/" + implicitly[PathBindable[String]].unbind("file", file))
}
}
// @LINE:9
class ReverseBadController {
// @LINE:9
def bad(): Call = {
Call("GET", _prefix + { _defaultPrefix } + "bad")
}
}
// @LINE:5
class ReverseHomeController {
// @LINE:5
def index(): Call = {
Call("GET", _prefix)
}
}
// @LINE:6
class ReverseAsyncController {
// @LINE:6
def message(): Call = {
Call("GET", _prefix + { _defaultPrefix } + "message")
}
}
// @LINE:7
class ReverseStreamController {
// @LINE:7
def stream(): Call = {
Call("GET", _prefix + { _defaultPrefix } + "stream")
}
}
}
// @LINE:9
// @LINE:8
// @LINE:7
// @LINE:6
// @LINE:5
package controllers.javascript {
// @LINE:8
class ReverseAssets {
// @LINE:8
def at : JavascriptReverseRoute = JavascriptReverseRoute(
"controllers.Assets.at",
"""
function(file) {
return _wA({method:"GET", url:"""" + _prefix + { _defaultPrefix } + """" + "assets/" + (""" + implicitly[PathBindable[String]].javascriptUnbind + """)("file", file)})
}
"""
)
}
// @LINE:9
class ReverseBadController {
// @LINE:9
def bad : JavascriptReverseRoute = JavascriptReverseRoute(
"controllers.BadController.bad",
"""
function() {
return _wA({method:"GET", url:"""" + _prefix + { _defaultPrefix } + """" + "bad"})
}
"""
)
}
// @LINE:5
class ReverseHomeController {
// @LINE:5
def index : JavascriptReverseRoute = JavascriptReverseRoute(
"controllers.HomeController.index",
"""
function() {
return _wA({method:"GET", url:"""" + _prefix + """"})
}
"""
)
}
// @LINE:6
class ReverseAsyncController {
// @LINE:6
def message : JavascriptReverseRoute = JavascriptReverseRoute(
"controllers.AsyncController.message",
"""
function() {
return _wA({method:"GET", url:"""" + _prefix + { _defaultPrefix } + """" + "message"})
}
"""
)
}
// @LINE:7
class ReverseStreamController {
// @LINE:7
def stream : JavascriptReverseRoute = JavascriptReverseRoute(
"controllers.StreamController.stream",
"""
function() {
return _wA({method:"GET", url:"""" + _prefix + { _defaultPrefix } + """" + "stream"})
}
"""
)
}
}
// @LINE:9
// @LINE:8
// @LINE:7
// @LINE:6
// @LINE:5
package controllers.ref {
// @LINE:8
class ReverseAssets {
// @LINE:8
def at(path:String, file:String): play.api.mvc.HandlerRef[_] = new play.api.mvc.HandlerRef(
controllers.Assets.at(path, file), HandlerDef(this, "controllers.Assets", "at", Seq(classOf[String], classOf[String]), "GET", """""", _prefix + """assets/$file<.+>""")
)
}
// @LINE:9
class ReverseBadController {
// @LINE:9
def bad(): play.api.mvc.HandlerRef[_] = new play.api.mvc.HandlerRef(
controllers.BadController.bad(), HandlerDef(this, "controllers.BadController", "bad", Seq(), "GET", """""", _prefix + """bad""")
)
}
// @LINE:5
class ReverseHomeController {
// @LINE:5
def index(): play.api.mvc.HandlerRef[_] = new play.api.mvc.HandlerRef(
controllers.HomeController.index(), HandlerDef(this, "controllers.HomeController", "index", Seq(), "GET", """""", _prefix + """""")
)
}
// @LINE:6
class ReverseAsyncController {
// @LINE:6
def message(): play.api.mvc.HandlerRef[_] = new play.api.mvc.HandlerRef(
controllers.AsyncController.message(), HandlerDef(this, "controllers.AsyncController", "message", Seq(), "GET", """""", _prefix + """message""")
)
}
// @LINE:7
class ReverseStreamController {
// @LINE:7
def stream(): play.api.mvc.HandlerRef[_] = new play.api.mvc.HandlerRef(
controllers.StreamController.stream(), HandlerDef(this, "controllers.StreamController", "stream", Seq(), "GET", """""", _prefix + """stream""")
)
}
}
|
trask/glowroot
|
agent/plugins/play-plugin/src/test/app-2.2.x-java/scala/routes_reverseRouting.scala
|
Scala
|
apache-2.0
| 5,256 |
/*
* Copyright (c) 2018. Fengguo Wei and others.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License v2.0
* which accompanies this distribution, and is available at
* https://www.apache.org/licenses/LICENSE-2.0
*
* Detailed contributors are listed in the CONTRIBUTOR.md
*/
package org.argus.jawa.core.compiler.interactive
/** Typical interaction, given a predicate <user-input>, a function <display>,
* and an exception handler <handle>:
*
* val TIMEOUT = 100 // (milliseconds) or something like that
* val r = new Response()
* while (!r.isComplete && !r.isCancelled) {
* if (<user-input>) r.cancel()
* else r.get(TIMEOUT) match {
* case Some(Left(data)) => <display>(data)
* case Some(Right(exc)) => <handle>(exc)
* case None =>
* }
* }
*/
class Response[T] {
private var data: Option[Either[T, Throwable]] = None
private var complete = false
private var cancelled = false
/** Set provisional data, more to come
*/
def setProvisionally(x: T): Unit = synchronized {
data = Some(Left(x))
}
/** Set final data, and mark response as complete.
*/
def set(x: T): Unit = synchronized {
data = Some(Left(x))
complete = true
notifyAll()
}
/** Store raised exception in data, and mark response as complete.
*/
def raise(exc: Throwable): Unit = synchronized {
data = Some(Right(exc))
complete = true
notifyAll()
}
/** Get final data, wait as long as necessary.
* When interrupted will return with Right(InterruptedException)
*/
def get: Either[T, Throwable] = synchronized {
while (!complete) {
try {
wait()
} catch {
case exc: InterruptedException =>
Thread.currentThread().interrupt()
raise(exc)
}
}
data.get
}
/** Optionally get data within `timeout` milliseconds.
* When interrupted will return with Some(Right(InterruptedException))
* When timeout ends, will return last stored provisional result,
* or else None if no provisional result was stored.
*/
def get(timeout: Long): Option[Either[T, Throwable]] = synchronized {
val start = System.currentTimeMillis
var current = start
while (!complete && start + timeout > current) {
try {
wait(timeout - (current - start))
} catch {
case exc: InterruptedException =>
Thread.currentThread().interrupt()
raise(exc)
}
current = System.currentTimeMillis
}
data
}
/** Final data set was stored
*/
def isComplete: Boolean = synchronized { complete }
/** Cancel action computing this response (Only the
* party that calls get on a response may cancel).
*/
def cancel(): Unit = synchronized { cancelled = true }
/** A cancel request for this response has been issued
*/
def isCancelled: Boolean = synchronized { cancelled }
def clear(): Unit = synchronized {
data = None
complete = false
cancelled = false
}
}
|
arguslab/Argus-SAF
|
jawa/src/main/scala/org/argus/jawa/core/compiler/interactive/Response.scala
|
Scala
|
apache-2.0
| 3,061 |
package com.jgdodson.rosalind
object Sign {
def main(args: Array[String]): Unit = {
val n = args(0).toInt
val count = numSignedPermutations(n)
val perms = Perm.permutations2((1 to n).toSet).flatMap(signed).map(_.mkString(" "))
println(count)
println(perms.mkString("\n"))
}
def numSignedPermutations(n: Int): Int = {
math.pow(2, n).toInt * (2 to n).product
}
def signed(perm: Vector[Int]): Set[Vector[Int]] = {
def prependAll(n: Int, ss: Set[Vector[Int]]): Set[Vector[Int]] = {
ss.map(item => n +: item)
}
if (perm.length == 1) Set(perm, perm.map(i => -i))
else {
prependAll(perm.head, signed(perm.tail)) ++ prependAll(-perm.head, signed(perm.tail))
}
}
}
|
PilgrimShadow/Rosalind.scala
|
src/main/scala/com/jgdodson/rosalind/Sign.scala
|
Scala
|
mit
| 731 |
package katas.scala.water
import org.scalatest.Matchers
import org.junit.Test
/**
* User: dima
* Date: 10/11/2012
*/
class Water0 extends Matchers {
class Pouring(capacity: Vector[Int]) {
type State = Vector[Int]
val initialState = capacity map {x => 0}
val glasses = 0 until capacity.size
val moves =
(for (g <- glasses) yield Empty(g)) ++
(for (g <- glasses) yield Fill(g)) ++
(for (from <- glasses; to <- glasses if (from != to)) yield Pour(from, to))
val initialPath = new Path(Nil)
val pathSets = from(Set(initialPath), Set(initialState))
trait Move {
def change(state: State): State
}
case class Empty(glass: Int) extends Move {
def change(state: State) = state updated (glass, 0)
}
case class Fill(glass: Int) extends Move {
def change(state: State) = state updated (glass, capacity(glass))
}
case class Pour(from: Int, to: Int) extends Move {
def change(state: State) = {
val amount = state(from) min (capacity(to) - state(to))
state updated (from, state(from) - amount) updated (to, state(to) + amount)
}
}
case class Path(history: List[Move]) {
def endState = (history foldRight initialState) (_ change _)
def extend(move: Move) = new Path(move :: history)
override def toString = (history.reverse mkString ", ") + "-->" + endState
}
def from(paths: Set[Path], explored: Set[State]): Stream[Set[Path]] = {
if (paths.isEmpty) Stream.empty
else {
val more = for {
path <- paths
next <- moves map path.extend
if !(explored contains next.endState)
} yield next
paths #:: from(more, explored ++ (more map (_.endState)))
}
}
def solutions(target: Int): Stream[Path] = {
for {
pathSet <- pathSets
path <- pathSet
if (path.endState contains target)
} yield path
}
}
@Test def shouldFindAllPossibleMoves() {
val pouring = new Pouring(Vector(4, 7))
import pouring._
moves should equal(Seq(
Empty(0),
Empty(1),
Fill(0),
Fill(1),
Pour(0,1),
Pour(1,0)
))
}
@Test def shouldFindSolutionForTwoGlasses() {
val pouring = new Pouring(Vector(4, 7))
import pouring._
val solution = solutions(6).head
solution.endState should equal(Vector(4, 6))
solution should equal(Path(List(
Fill(1), Pour(1, 0), Empty(0), Pour(1, 0), Fill(1), Pour(1, 0)
).reverse))
}
@Test def shouldFindSolutionForThreeGlasses() {
val pouring = new Pouring(Vector(4, 9, 19))
import pouring._
val solution = solutions(17).head
solution.endState should equal(Vector(0, 0, 17))
solution should equal(Path(List(
Fill(0), Pour(0, 2), Fill(1), Fill(0), Pour(0, 2), Pour(1, 2)
).reverse))
}
}
|
dkandalov/katas
|
scala/src/katas/scala/water/Water0.scala
|
Scala
|
unlicense
| 2,666 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler.cluster.mesos
import java.io.File
import java.util.{List => JList}
import java.util.Collections
import scala.collection.JavaConversions._
import scala.collection.mutable.{HashMap, HashSet}
import org.apache.mesos.{Scheduler => MScheduler}
import org.apache.mesos._
import org.apache.mesos.Protos.{TaskInfo => MesosTaskInfo, TaskState => MesosTaskState, _}
import org.apache.spark.{Logging, SparkContext, SparkEnv, SparkException, TaskState}
import org.apache.spark.scheduler.TaskSchedulerImpl
import org.apache.spark.scheduler.cluster.CoarseGrainedSchedulerBackend
import org.apache.spark.util.{Utils, AkkaUtils}
/**
* A SchedulerBackend that runs tasks on Mesos, but uses "coarse-grained" tasks, where it holds
* onto each Mesos node for the duration of the Spark job instead of relinquishing cores whenever
* a task is done. It launches Spark tasks within the coarse-grained Mesos tasks using the
* CoarseGrainedSchedulerBackend mechanism. This class is useful for lower and more predictable
* latency.
*
* Unfortunately this has a bit of duplication from MesosSchedulerBackend, but it seems hard to
* remove this.
*/
private[spark] class CoarseMesosSchedulerBackend(
scheduler: TaskSchedulerImpl,
sc: SparkContext,
master: String)
extends CoarseGrainedSchedulerBackend(scheduler, sc.env.actorSystem)
with MScheduler
with Logging {
val MAX_SLAVE_FAILURES = 2 // Blacklist a slave after this many failures
// Lock used to wait for scheduler to be registered
var isRegistered = false
val registeredLock = new Object()
// Driver for talking to Mesos
var driver: SchedulerDriver = null
// Maximum number of cores to acquire (TODO: we'll need more flexible controls here)
val maxCores = conf.get("spark.cores.max", Int.MaxValue.toString).toInt
// Cores we have acquired with each Mesos task ID
val coresByTaskId = new HashMap[Int, Int]
var totalCoresAcquired = 0
val slaveIdsWithExecutors = new HashSet[String]
val taskIdToSlaveId = new HashMap[Int, String]
val failuresBySlaveId = new HashMap[String, Int] // How many times tasks on each slave failed
val extraCoresPerSlave = conf.getInt("spark.mesos.extra.cores", 0)
var nextMesosTaskId = 0
@volatile var appId: String = _
def newMesosTaskId(): Int = {
val id = nextMesosTaskId
nextMesosTaskId += 1
id
}
override def start() {
super.start()
synchronized {
new Thread("CoarseMesosSchedulerBackend driver") {
setDaemon(true)
override def run() {
val scheduler = CoarseMesosSchedulerBackend.this
val fwInfo = FrameworkInfo.newBuilder().setUser(sc.sparkUser).setName(sc.appName).build()
driver = new MesosSchedulerDriver(scheduler, fwInfo, master)
try { {
val ret = driver.run()
logInfo("driver.run() returned with code " + ret)
}
} catch {
case e: Exception => logError("driver.run() failed", e)
}
}
}.start()
waitForRegister()
}
}
def createCommand(offer: Offer, numCores: Int): CommandInfo = {
val executorSparkHome = conf.getOption("spark.mesos.executor.home")
.orElse(sc.getSparkHome())
.getOrElse {
throw new SparkException("Executor Spark home `spark.mesos.executor.home` is not set!")
}
val environment = Environment.newBuilder()
val extraClassPath = conf.getOption("spark.executor.extraClassPath")
extraClassPath.foreach { cp =>
environment.addVariables(
Environment.Variable.newBuilder().setName("SPARK_CLASSPATH").setValue(cp).build())
}
val extraJavaOpts = conf.get("spark.executor.extraJavaOptions", "")
// Set the environment variable through a command prefix
// to append to the existing value of the variable
val prefixEnv = conf.getOption("spark.executor.extraLibraryPath").map { p =>
Utils.libraryPathEnvPrefix(Seq(p))
}.getOrElse("")
environment.addVariables(
Environment.Variable.newBuilder()
.setName("SPARK_EXECUTOR_OPTS")
.setValue(extraJavaOpts)
.build())
sc.executorEnvs.foreach { case (key, value) =>
environment.addVariables(Environment.Variable.newBuilder()
.setName(key)
.setValue(value)
.build())
}
val command = CommandInfo.newBuilder()
.setEnvironment(environment)
val driverUrl = AkkaUtils.address(
AkkaUtils.protocol(sc.env.actorSystem),
SparkEnv.driverActorSystemName,
conf.get("spark.driver.host"),
conf.get("spark.driver.port"),
CoarseGrainedSchedulerBackend.ACTOR_NAME)
val uri = conf.get("spark.executor.uri", null)
if (uri == null) {
val runScript = new File(executorSparkHome, "./bin/spark-class").getCanonicalPath
command.setValue(
"%s \"%s\" org.apache.spark.executor.CoarseGrainedExecutorBackend"
.format(prefixEnv, runScript) +
s" --driver-url $driverUrl" +
s" --executor-id ${offer.getSlaveId.getValue}" +
s" --hostname ${offer.getHostname}" +
s" --cores $numCores" +
s" --app-id $appId")
} else {
// Grab everything to the first '.'. We'll use that and '*' to
// glob the directory "correctly".
val basename = uri.split('/').last.split('.').head
command.setValue(
s"cd $basename*; $prefixEnv " +
"./bin/spark-class org.apache.spark.executor.CoarseGrainedExecutorBackend" +
s" --driver-url $driverUrl" +
s" --executor-id ${offer.getSlaveId.getValue}" +
s" --hostname ${offer.getHostname}" +
s" --cores $numCores" +
s" --app-id $appId")
command.addUris(CommandInfo.URI.newBuilder().setValue(uri))
}
command.build()
}
override def offerRescinded(d: SchedulerDriver, o: OfferID) {}
override def registered(d: SchedulerDriver, frameworkId: FrameworkID, masterInfo: MasterInfo) {
appId = frameworkId.getValue
logInfo("Registered as framework ID " + appId)
registeredLock.synchronized {
isRegistered = true
registeredLock.notifyAll()
}
}
def waitForRegister() {
registeredLock.synchronized {
while (!isRegistered) {
registeredLock.wait()
}
}
}
override def disconnected(d: SchedulerDriver) {}
override def reregistered(d: SchedulerDriver, masterInfo: MasterInfo) {}
/**
* Method called by Mesos to offer resources on slaves. We respond by launching an executor,
* unless we've already launched more than we wanted to.
*/
override def resourceOffers(d: SchedulerDriver, offers: JList[Offer]) {
synchronized {
val filters = Filters.newBuilder().setRefuseSeconds(-1).build()
for (offer <- offers) {
val slaveId = offer.getSlaveId.toString
val mem = getResource(offer.getResourcesList, "mem")
val cpus = getResource(offer.getResourcesList, "cpus").toInt
if (totalCoresAcquired < maxCores &&
mem >= MemoryUtils.calculateTotalMemory(sc) &&
cpus >= 1 &&
failuresBySlaveId.getOrElse(slaveId, 0) < MAX_SLAVE_FAILURES &&
!slaveIdsWithExecutors.contains(slaveId)) {
// Launch an executor on the slave
val cpusToUse = math.min(cpus, maxCores - totalCoresAcquired)
totalCoresAcquired += cpusToUse
val taskId = newMesosTaskId()
taskIdToSlaveId(taskId) = slaveId
slaveIdsWithExecutors += slaveId
coresByTaskId(taskId) = cpusToUse
val task = MesosTaskInfo.newBuilder()
.setTaskId(TaskID.newBuilder().setValue(taskId.toString).build())
.setSlaveId(offer.getSlaveId)
.setCommand(createCommand(offer, cpusToUse + extraCoresPerSlave))
.setName("Task " + taskId)
.addResources(createResource("cpus", cpusToUse))
.addResources(createResource("mem",
MemoryUtils.calculateTotalMemory(sc)))
.build()
d.launchTasks(
Collections.singleton(offer.getId), Collections.singletonList(task), filters)
} else {
// Filter it out
d.launchTasks(
Collections.singleton(offer.getId), Collections.emptyList[MesosTaskInfo](), filters)
}
}
}
}
/** Helper function to pull out a resource from a Mesos Resources protobuf */
private def getResource(res: JList[Resource], name: String): Double = {
for (r <- res if r.getName == name) {
return r.getScalar.getValue
}
0
}
/** Build a Mesos resource protobuf object */
private def createResource(resourceName: String, quantity: Double): Protos.Resource = {
Resource.newBuilder()
.setName(resourceName)
.setType(Value.Type.SCALAR)
.setScalar(Value.Scalar.newBuilder().setValue(quantity).build())
.build()
}
override def statusUpdate(d: SchedulerDriver, status: TaskStatus) {
val taskId = status.getTaskId.getValue.toInt
val state = status.getState
logInfo("Mesos task " + taskId + " is now " + state)
synchronized {
if (TaskState.isFinished(TaskState.fromMesos(state))) {
val slaveId = taskIdToSlaveId(taskId)
slaveIdsWithExecutors -= slaveId
taskIdToSlaveId -= taskId
// Remove the cores we have remembered for this task, if it's in the hashmap
for (cores <- coresByTaskId.get(taskId)) {
totalCoresAcquired -= cores
coresByTaskId -= taskId
}
// If it was a failure, mark the slave as failed for blacklisting purposes
if (TaskState.isFailed(TaskState.fromMesos(state))) {
failuresBySlaveId(slaveId) = failuresBySlaveId.getOrElse(slaveId, 0) + 1
if (failuresBySlaveId(slaveId) >= MAX_SLAVE_FAILURES) {
logInfo("Blacklisting Mesos slave " + slaveId + " due to too many failures; " +
"is Spark installed on it?")
}
}
driver.reviveOffers() // In case we'd rejected everything before but have now lost a node
}
}
}
override def error(d: SchedulerDriver, message: String) {
logError("Mesos error: " + message)
scheduler.error(message)
}
override def stop() {
super.stop()
if (driver != null) {
driver.stop()
}
}
override def frameworkMessage(d: SchedulerDriver, e: ExecutorID, s: SlaveID, b: Array[Byte]) {}
override def slaveLost(d: SchedulerDriver, slaveId: SlaveID) {
logInfo("Mesos slave lost: " + slaveId.getValue)
synchronized {
if (slaveIdsWithExecutors.contains(slaveId.getValue)) {
// Note that the slave ID corresponds to the executor ID on that slave
slaveIdsWithExecutors -= slaveId.getValue
removeExecutor(slaveId.getValue, "Mesos slave lost")
}
}
}
override def executorLost(d: SchedulerDriver, e: ExecutorID, s: SlaveID, status: Int) {
logInfo("Executor lost: %s, marking slave %s as lost".format(e.getValue, s.getValue))
slaveLost(d, s)
}
override def applicationId(): String =
Option(appId).getOrElse {
logWarning("Application ID is not initialized yet.")
super.applicationId
}
}
|
Dax1n/spark-core
|
core/src/main/scala/org/apache/spark/scheduler/cluster/mesos/CoarseMesosSchedulerBackend.scala
|
Scala
|
apache-2.0
| 11,998 |
/**********************************************************************************************************************
* This file is part of Scrupal, a Scalable Reactive Web Application Framework for Content Management *
* *
* Copyright (c) 2015, Reactific Software LLC. All Rights Reserved. *
* *
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance *
* with the License. You may obtain a copy of the License at *
* *
* http://www.apache.org/licenses/LICENSE-2.0 *
* *
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed *
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for *
* the specific language governing permissions and limitations under the License. *
**********************************************************************************************************************/
package scrupal.storage.api
import java.io.{ByteArrayInputStream, InputStream, OutputStream}
import akka.util.{ByteString, ByteStringBuilder}
import com.esotericsoftware.kryo.Kryo
import com.esotericsoftware.kryo.Serializer
import com.esotericsoftware.kryo.serializers.CompatibleFieldSerializer
import com.esotericsoftware.kryo.pool._
import com.twitter.chill.{KryoBase, Input, Output, ScalaKryoInstantiator}
import scrupal.utils.{Registry, Registrable}
/** Encode/Decode Of Storable with Kryo/Chill Serialization
*
* Every storable class type needs to derive from Storable and have a Codec provided for it. The default Serializer,
* CompatibleFieldSerializer supports backwards and forwards compatibility through code changes. That is appropriate
* for most uses even though it is the most expensive option. Compatibility can be sacrificed for speed by using
* one of the other Kryo Serialization classes or a hand crafted one.
*
* Each Storable class should make an implicit Codec object available in its companion like this:
* {{{
* class Foo extends Storable { ... }
* object Foo {
* implicit object FooCodec extends Codec[Foo] {
* val regNum = 79
* val clazz = classOf[Foo]
* override def serializer(kryo: Kryo) : Serializer[T] = { ... } // optionally
* }
* }
* }}}
*/
trait Codec[T <: Storable] extends Registrable[Codec[_]] {
def registry : CodecRegistry
def regNum : Int
def clazz : Class[T]
def serializer(kryo: Kryo) : Serializer[T] = new CompatibleFieldSerializer[T](kryo, clazz)
final def encode(obj : T) : Array[Byte] = {
val bldr : ByteStringBuilder = ByteString.newBuilder
encode(obj, bldr)
bldr.result().toArray
}
final def encode(obj : T, bldr: ByteStringBuilder) : Unit = {
registry.withKryo { kryo: Kryo ⇒
val output = new Output(bldr.asOutputStream)
try {
kryo.writeClassAndObject(output, obj)
} finally{
output.close()
}
}
}
final def encode(obj : T, out: OutputStream) : Unit = {
registry.withKryo { kryo: Kryo ⇒
val output = new Output(out)
try {
kryo.writeClassAndObject(output, obj)
} finally {
output.close()
}
}
}
final def decode(bytes: Array[Byte]) : T = {
registry.withKryo { kryo: Kryo ⇒
val bais = new ByteArrayInputStream(bytes)
val input = new Input(bais)
try {
kryo.readClassAndObject(input).asInstanceOf[T]
} finally {
input.close()
}
}
}
final def decode(in : InputStream) : T = {
registry.withKryo { kryo : Kryo ⇒
val input = new Input(in)
try {
kryo.readClassAndObject(input).asInstanceOf[T]
} finally {
input.close()
}
}
}
}
class CodecRegistry extends Registry[Codec[_]] {
def registrantsName = "codec"
def registryName = "Codecs"
val MinimumRegistrationNumber = 128
lazy val instantiator = new ScalaKryoInstantiator {
val storables : Seq[Codec[_]] = {
val numbers = _registry.map { case (k, v) ⇒ v.regNum }.toSeq
val unique_numbers = numbers.distinct
if (numbers.size != unique_numbers.size) {
val duplicates = numbers.diff(unique_numbers)
val duplicate_codecs = _registry.filter { case (sym, codec) ⇒
duplicates.contains(codec.regNum)
} groupBy { case (sym, codec) ⇒
codec.regNum
}
val messages = duplicate_codecs.map { case (num, map) ⇒
val names = map.map { case (sym, cod) ⇒ s"${cod.id.name}:${cod.clazz.getSimpleName}" }
s"#$num: ${names.mkString(", ")}"
}
toss(s"Duplicate Storable Registration Numbers Found For: ${messages.mkString("; ")}")
}
val kryo = super.newKryo()
val conflicts = for ( (sym,codec) ← _registry ;
reg = kryo.getRegistration(codec.regNum) if reg != null
) yield {
s"#${codec.regNum}: ${codec.clazz.getSimpleName} & ${reg.getType.getSimpleName}"
}
if (conflicts.nonEmpty) {
toss(s"Codec registrations conflict with standard serializers: ${conflicts.mkString(",\\n")}.")
}
val tooSmalls = for ( (sym,codec) ← _registry if codec.regNum < MinimumRegistrationNumber) yield {
s"#${codec.regNum}: ${codec.clazz.getSimpleName}"
}
if (tooSmalls.nonEmpty) {
toss(s"Codec registration numbers too small: ${tooSmalls.mkString(",\\n")}.")
}
_registry.map { case (sym,codec) ⇒ codec }.toSeq
}
override def newKryo(): KryoBase = {
val kryo = super.newKryo()
kryo.setAsmEnabled(true)
kryo.setReferences(true)
kryo.setRegistrationRequired(true)
kryo.addDefaultSerializer(classOf[Storable], new CompatibleFieldSerializer[Storable](kryo, classOf[Storable]))
kryo.setDefaultSerializer(classOf[CompatibleFieldSerializer[_]])
var min = Integer.MAX_VALUE
var max = Integer.MIN_VALUE
for (s ← storables) {
kryo.register(s.clazz, s.serializer(kryo), s.regNum)
if (s.regNum < min) min = s.regNum
if (s.regNum > max) max = s.regNum
}
kryo.register(classOf[scrupal.storage.api.Storable.StorablePimps[_]], kryo.getNextRegistrationId)
kryo.register(classOf[scrupal.storage.api.Storable#LongQueryable], kryo.getNextRegistrationId)
val after = kryo.getNextRegistrationId
log.info(s"Registered ${storables.size} Storable serializers with minID=$min and maxID=$max. Next ID=${after}")
kryo
}
}
private lazy val factory = new KryoFactory {
def create: Kryo = {
instantiator.newKryo()
}
}
// Build pool with SoftReferences enabled (optional)
private val pool: KryoPool = new KryoPool.Builder(factory).softReferences().build()
def withKryo[T](f : (Kryo) ⇒ T) : T = {
val kryo: Kryo = pool.borrow()
try {
f(kryo)
} finally {
pool.release(kryo)
}
}
}
|
scrupal/scrupal
|
scrupal-storage/src/main/scala/scrupal/storage/api/Codec.scala
|
Scala
|
apache-2.0
| 7,649 |
package doobie
import scalaz.{ Monad, Catchable, Unapply, Leibniz, Free, Functor }
import scalaz.stream.Process
/** Module of aliases for commonly-used types and syntax; use as `import doobie.imports._` */
object imports {
/**
* Alias for `doobie.free.connection`.
* @group Free Module Aliases
*/
val FC = doobie.free.connection
/**
* Alias for `doobie.free.statement`.
* @group Free Module Aliases
*/
val FS = doobie.free.statement
/**
* Alias for `doobie.free.preparedstatement`.
* @group Free Module Aliases
*/
val FPS = doobie.free.preparedstatement
/**
* Alias for `doobie.free.resultset`.
* @group Free Module Aliases
*/
val FRS = doobie.free.resultset
/**
* Alias for `doobie.hi.connection`.
* @group Hi Module Aliases
*/
val HC = doobie.hi.connection
/**
* Alias for `doobie.hi.drivermanager`.
* @group Hi Module Aliases
*/
val HDM = doobie.hi.drivermanager
/**
* Alias for `doobie.hi.statement`.
* @group Hi Module Aliases
*/
val HS = doobie.hi.statement
/**
* Alias for `doobie.hi.preparedstatement`.
* @group Hi Module Aliases
*/
val HPS = doobie.hi.preparedstatement
/**
* Alias for `doobie.hi.resultset`.
* @group Hi Module Aliases
*/
val HRS = doobie.hi.resultset
/** @group Type Aliases */ type ConnectionIO[A] = doobie.free.connection.ConnectionIO[A]
/** @group Type Aliases */ type StatementIO[A] = doobie.free.statement.StatementIO[A]
/** @group Type Aliases */ type PreparedStatementIO[A] = doobie.free.preparedstatement.PreparedStatementIO[A]
/** @group Type Aliases */ type ResultSetIO[A] = doobie.free.resultset.ResultSetIO[A]
/** @group Syntax */
implicit def toDoobieCatchableOps[M[_]: Monad: Catchable, A](ma: M[A]) =
new doobie.syntax.catchable.DoobieCatchableOps(ma)
/** @group Syntax */
implicit def toDoobieCatchSqlOps[M[_]: Monad: Catchable, A](ma: M[A]) =
new doobie.syntax.catchsql.DoobieCatchSqlOps(ma)
/** @group Syntax */
implicit def toProcessOps[F[_]: Monad: Catchable: Capture, A](fa: Process[F, A]) =
new doobie.syntax.process.ProcessOps(fa)
/** @group Syntax */
implicit def toSqlInterpolator(sc: StringContext) =
new doobie.syntax.string.SqlInterpolator(sc)
/** @group Syntax */
implicit def toMoreConnectionIOOps[A](ma: ConnectionIO[A]) =
new doobie.syntax.connectionio.MoreConnectionIOOps(ma)
/** @group Type Aliases */ type Meta[A] = doobie.util.meta.Meta[A]
/** @group Companion Aliases */ val Meta = doobie.util.meta.Meta
/** @group Type Aliases */ type Atom[A] = doobie.util.atom.Atom[A]
/** @group Companion Aliases */ val Atom = doobie.util.atom.Atom
/** @group Type Aliases */ type Capture[M[_]] = doobie.util.capture.Capture[M]
/** @group Companion Aliases */ val Capture = doobie.util.capture.Capture
/** @group Type Aliases */ type Composite[A] = doobie.util.composite.Composite[A]
/** @group Companion Aliases */ val Composite = doobie.util.composite.Composite
/** @group Type Aliases */ type Query[A,B] = doobie.util.query.Query[A,B]
/** @group Companion Aliases */ val Query = doobie.util.query.Query
/** @group Type Aliases */ type Update[A] = doobie.util.update.Update[A]
/** @group Companion Aliases */ val Update = doobie.util.update.Update
/** @group Type Aliases */ type Query0[A] = doobie.util.query.Query0[A]
/** @group Companion Aliases */ val Query0 = doobie.util.query.Query0
/** @group Type Aliases */ type Update0 = doobie.util.update.Update0
/** @group Companion Aliases */ val Update0 = doobie.util.update.Update0
/** @group Type Aliases */ type SqlState = doobie.enum.sqlstate.SqlState
/** @group Companion Aliases */ val SqlState = doobie.enum.sqlstate.SqlState
/** @group Type Aliases */ type Transactor[M[_]] = doobie.util.transactor.Transactor[M]
/** @group Companion Aliases */ val DriverManagerTransactor = doobie.util.transactor.DriverManagerTransactor
/** @group Companion Aliases */ val DataSourceTransactor = doobie.util.transactor.DataSourceTransactor
/** @group Typeclass Instances */
implicit val NameCatchable = doobie.util.name.NameCatchable
/** @group Typeclass Instances */
implicit val NameCapture = doobie.util.name.NameCapture
/**
* Free monad derivation with correct shape to derive an instance for `Free[Coyoneda[F, ?], ?]`.
* @group Hacks
*/
implicit def freeMonadC[FT[_[_], _], F[_]](implicit ev: Functor[FT[F, ?]]) =
Free.freeMonad[FT[F,?]]
/**
* Unapply with correct shape to unpack `Monad[Free[Coyoneda[F, ?], ?]]`.
* @group Hacks
*/
implicit def unapplyMMFA[TC[_[_]], M0[_[_], _], M1[_[_], _], F0[_], A0](implicit TC0: TC[M0[M1[F0,?], ?]]):
Unapply[TC, M0[M1[F0,?], A0]] {
type M[X] = M0[M1[F0,?], X]
type A = A0
} =
new Unapply[TC, M0[M1[F0,?], A0]] {
type M[X] = M0[M1[F0,?], X]
type A = A0
def TC = TC0
def leibniz = Leibniz.refl
}
}
|
jamescway/doobie
|
core/src/main/scala/doobie/imports.scala
|
Scala
|
mit
| 5,104 |
package lambdacalculus.machine.CallByName
import lambdacalculus.machine._
case class CBNConfiguration(stack: List[MachineValue], env:List[MachineValue], code:List[Instruction]) extends Configuration {
override def toString:String = {
val sb = new StringBuilder()
sb ++= "stack: " ++ stack.mkString("(", ", ", ")")
sb ++= "\\nenv : " ++ env.mkString("(", ", ", ")")
sb ++= "\\ncode : " ++ code.mkString("(", ", ", ")")
sb.toString()
}
override def isTransformable: Boolean = !code.isEmpty
}
|
cn-uofbasel/nfn-scala
|
lambdacalc/src/main/scala/lambdacalculus/machine/CallByName/CBNConfiguration.scala
|
Scala
|
isc
| 521 |
/*
* Copyright 2013 Julian Peeters
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package artisanal.pickle.maker
package tags
import scala.reflect.internal.pickling._
case class NoneSym() {
var position = 0
def write(currentPosition: Position, myPickleBuffer: PickleBuffer) = {
position = currentPosition.current
//tag
myPickleBuffer.writeByte(3)
//len
myPickleBuffer.writeNat(0)
//data
//}
currentPosition.current += 1
}
}
|
julianpeeters/artisanal-pickle-maker
|
src/main/scala/tags/NONEsym.scala
|
Scala
|
apache-2.0
| 977 |
/*
* Copyright 2014 Commonwealth Computer Research, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.locationtech.geomesa.core.data
import org.apache.accumulo.core.client.{BatchWriterConfig, Connector}
import org.apache.accumulo.core.data.{Mutation, Range, Value}
import org.apache.accumulo.core.security.ColumnVisibility
import org.apache.hadoop.io.Text
import org.locationtech.geomesa.core.data.AccumuloBackedMetadata._
import org.locationtech.geomesa.core.security.AuthorizationsProvider
import org.locationtech.geomesa.core.util.SelfClosingIterator
import scala.collection.JavaConversions._
import scala.collection.mutable
/**
* GeoMesa Metadata/Catalog abstraction using key/value String pairs storing
* them on a per-featurename basis
*/
trait GeoMesaMetadata {
def delete(featureName: String, numThreads: Int)
def insert(featureName: String, key: String, value: String)
def insert(featureName: String, kvPairs: Map[String, String])
def insert(featureName: String, key: String, value: String, vis: String)
def read(featureName: String, key: String): Option[String]
def readRequired(featureName: String, key: String): String
def readRequiredNoCache(featureName: String, key: String): Option[String]
def expireCache(featureName: String)
def getFeatureTypes: Array[String]
}
class AccumuloBackedMetadata(connector: Connector,
catalogTable: String,
writeVisibilities: String,
authorizationsProvider: AuthorizationsProvider) extends GeoMesaMetadata {
// warning: only access this map in a synchronized fashion
private val metaDataCache = new mutable.HashMap[(String, String), Option[String]]()
// TODO memory should be configurable
private val metadataBWConfig =
new BatchWriterConfig().setMaxMemory(10000L).setMaxWriteThreads(1)
/**
* Handles creating a mutation for writing metadata
*
* @param featureName
* @return
*/
private def getMetadataMutation(featureName: String) = new Mutation(getMetadataRowKey(featureName))
/**
* Handles encoding metadata into a mutation.
*
* @param featureName
* @param mutation
* @param key
* @param value
*/
private def putMetadata(featureName: String,
mutation: Mutation,
key: String,
value: String) {
mutation.put(new Text(key), EMPTY_COLQ, new Value(value.getBytes))
// also pre-fetch into the cache
if (!value.isEmpty) {
metaDataCache.synchronized { metaDataCache.put((featureName, key), Some(value)) }
}
}
/**
* Handles writing mutations
*
* @param mutations
*/
private def writeMutations(mutations: Mutation*): Unit = {
val writer = connector.createBatchWriter(catalogTable, metadataBWConfig)
for (mutation <- mutations) {
writer.addMutation(mutation)
}
writer.flush()
writer.close()
}
/**
* Handles deleting metadata from the catalog by using the Range obtained from the METADATA_TAG and featureName
* and setting that as the Range to be handled and deleted by Accumulo's BatchDeleter
*
* @param featureName the name of the table to query and delete from
* @param numThreads the number of concurrent threads to spawn for querying
*/
override def delete(featureName: String, numThreads: Int): Unit = {
val range = new Range(getMetadataRowKey(featureName))
val deleter = connector.createBatchDeleter(catalogTable,
authorizationsProvider.getAuthorizations,
numThreads,
metadataBWConfig)
deleter.setRanges(List(range))
deleter.delete()
deleter.close()
}
/**
* Creates the row id for a metadata entry
*
* @param featureName
* @return
*/
private def getMetadataRowKey(featureName: String) = new Text(METADATA_TAG + "_" + featureName)
/**
* Reads metadata from cache or scans if not available
*
* @param featureName
* @param key
* @return
*/
override def read(featureName: String, key: String): Option[String] =
metaDataCache.synchronized {
metaDataCache.getOrElseUpdate((featureName, key), readRequiredNoCache(featureName, key))
}
override def readRequired(featureName: String, key: String): String =
read(featureName, key)
.getOrElse(throw new RuntimeException(s"Unable to find required metadata property for key $key"))
/**
* Gets metadata by scanning the table, without the local cache
*
* Read metadata using scheme: ~METADATA_featureName metadataFieldName: insertionTimestamp metadataValue
*
* @param featureName
* @param key
* @return
*/
override def readRequiredNoCache(featureName: String, key: String): Option[String] = {
val scanner = createCatalogScanner
scanner.setRange(new Range(getMetadataRowKey(featureName)))
scanner.fetchColumn(new Text(key), EMPTY_COLQ)
SelfClosingIterator(scanner).map(_.getValue.toString).toList.headOption
}
/**
* Create an Accumulo Scanner to the Catalog table to query Metadata for this store
*/
private def createCatalogScanner = connector.createScanner(catalogTable, authorizationsProvider.getAuthorizations)
override def expireCache(featureName: String) =
metaDataCache.synchronized {
metaDataCache.keys.filter { case (fn, _) => fn == featureName}.foreach(metaDataCache.remove)
}
override def insert(featureName: String, key: String, value: String) =
insert(featureName, Map(key -> value))
override def insert(featureName: String, kvPairs: Map[String, String]) = {
val mutation = getMetadataMutation(featureName)
kvPairs.foreach { case (k,v) =>
putMetadata(featureName, mutation, k, v)
}
writeMutations(mutation)
}
override def insert(featureName: String, key: String, value: String, vis: String) = {
val mutation = getMetadataMutation(featureName)
mutation.put(new Text(key), EMPTY_COLQ, new ColumnVisibility(vis), new Value(vis.getBytes))
writeMutations(mutation)
}
/**
* Scans metadata rows and pulls out the different feature types in the table
*
* @return
*/
override def getFeatureTypes: Array[String] = {
val scanner = createCatalogScanner
scanner.setRange(new Range(METADATA_TAG, METADATA_TAG_END))
// restrict to just schema cf so we only get 1 hit per feature
scanner.fetchColumnFamily(new Text(SCHEMA_KEY))
val resultItr = new Iterator[String] {
val src = scanner.iterator()
def hasNext = {
val next = src.hasNext
if (!next) {
scanner.close()
}
next
}
def next() = src.next().getKey.getRow.toString
}
resultItr.toArray.map(getFeatureNameFromMetadataRowKey)
}
/**
* Reads the feature name from a given metadata row key
*
* @param rowKey
* @return
*/
private def getFeatureNameFromMetadataRowKey(rowKey: String): String = {
val MetadataRowKeyRegex(featureName) = rowKey
featureName
}
}
object AccumuloBackedMetadata {
val MetadataRowKeyRegex = (METADATA_TAG + """_(.*)""").r
}
|
kevinwheeler/geomesa
|
geomesa-core/src/main/scala/org/locationtech/geomesa/core/data/GeoMesaMetadata.scala
|
Scala
|
apache-2.0
| 7,757 |
package org.oc.ld32.entity.ai
import org.lengine.maths.Vec2f
import org.oc.ld32.entity.EntityEnemy
import org.oc.ld32.level.BaguetteLevel
class AIWander(priority: Int, entity: EntityEnemy) extends AITask(priority, entity) {
var angle = 0f
var countdown = 0f
var direction: Vec2f = null
override def shouldContinue: Boolean = {
entity.target == null
}
override def canExecute: Boolean = {
entity.target == null
}
override def perform(delta: Float): Unit = {
countdown -= delta
if(countdown < 0) {
reset
} else {
val lvl = entity.level.asInstanceOf[BaguetteLevel]
if(!lvl.canGoTo(entity.boundingBox, entity.getPos.x + direction.x, entity.getPos.y)) {
direction.x = -direction.x
}
if(!lvl.canGoTo(entity.boundingBox, entity.getPos.x, entity.getPos.y + direction.y)) {
direction.y = -direction.y
}
entity.setAngle((Math.PI+Math.atan2(direction.y, direction.x)).toFloat)
if(lvl.canGoTo(entity.boundingBox, entity.getPos.x + direction.x, entity.getPos.y + direction.y)) {
entity.setPos(new Vec2f(entity.getPos.x + direction.x, entity.getPos.y + direction.y))
} else {
reset
}
}
}
override def reset: Unit = {
angle = (Math.random * 2f * Math.PI).toFloat
entity.setAngle(angle)
direction = new Vec2f(Math.cos(angle).toFloat, Math.sin(angle).toFloat)
if(Math.random < 0.25) // Has a 1/4 probability to create a pause
direction.set(0,0)
val max = 5f.toInt
val min = 1f.toInt
countdown = (Math.random * (max-min) + min).toFloat
}
}
|
OurCraft/LD32
|
src/main/scala/org/oc/ld32/entity/ai/AIWander.scala
|
Scala
|
apache-2.0
| 1,603 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.optimizer
import org.apache.spark.sql.Row
import org.apache.spark.sql.catalyst.dsl.expressions._
import org.apache.spark.sql.catalyst.dsl.plans._
import org.apache.spark.sql.catalyst.plans._
import org.apache.spark.sql.catalyst.plans.logical.{LocalRelation, LogicalPlan}
import org.apache.spark.sql.catalyst.rules.RuleExecutor
class PropagateEmptyRelationSuite extends PlanTest {
object Optimize extends RuleExecutor[LogicalPlan] {
val batches =
Batch("PropagateEmptyRelation", Once,
CombineUnions,
ReplaceDistinctWithAggregate,
ReplaceExceptWithAntiJoin,
ReplaceIntersectWithSemiJoin,
PushDownPredicate,
PruneFilters,
PropagateEmptyRelation) :: Nil
}
object OptimizeWithoutPropagateEmptyRelation extends RuleExecutor[LogicalPlan] {
val batches =
Batch("OptimizeWithoutPropagateEmptyRelation", Once,
CombineUnions,
ReplaceDistinctWithAggregate,
ReplaceExceptWithAntiJoin,
ReplaceIntersectWithSemiJoin,
PushDownPredicate,
PruneFilters) :: Nil
}
val testRelation1 = LocalRelation.fromExternalRows(Seq('a.int), data = Seq(Row(1)))
val testRelation2 = LocalRelation.fromExternalRows(Seq('b.int), data = Seq(Row(1)))
test("propagate empty relation through Union") {
val query = testRelation1
.where(false)
.union(testRelation2.where(false))
val optimized = Optimize.execute(query.analyze)
val correctAnswer = LocalRelation('a.int)
comparePlans(optimized, correctAnswer)
}
test("propagate empty relation through Join") {
// Testcases are tuples of (left predicate, right predicate, joinType, correct answer)
// Note that `None` is used to compare with OptimizeWithoutPropagateEmptyRelation.
val testcases = Seq(
(true, true, Inner, None),
(true, true, Cross, None),
(true, true, LeftOuter, None),
(true, true, RightOuter, None),
(true, true, FullOuter, None),
(true, true, LeftAnti, None),
(true, true, LeftSemi, None),
(true, false, Inner, Some(LocalRelation('a.int, 'b.int))),
(true, false, Cross, Some(LocalRelation('a.int, 'b.int))),
(true, false, LeftOuter, None),
(true, false, RightOuter, Some(LocalRelation('a.int, 'b.int))),
(true, false, FullOuter, None),
(true, false, LeftAnti, None),
(true, false, LeftSemi, None),
(false, true, Inner, Some(LocalRelation('a.int, 'b.int))),
(false, true, Cross, Some(LocalRelation('a.int, 'b.int))),
(false, true, LeftOuter, Some(LocalRelation('a.int, 'b.int))),
(false, true, RightOuter, None),
(false, true, FullOuter, None),
(false, true, LeftAnti, Some(LocalRelation('a.int))),
(false, true, LeftSemi, Some(LocalRelation('a.int))),
(false, false, Inner, Some(LocalRelation('a.int, 'b.int))),
(false, false, Cross, Some(LocalRelation('a.int, 'b.int))),
(false, false, LeftOuter, Some(LocalRelation('a.int, 'b.int))),
(false, false, RightOuter, Some(LocalRelation('a.int, 'b.int))),
(false, false, FullOuter, Some(LocalRelation('a.int, 'b.int))),
(false, false, LeftAnti, Some(LocalRelation('a.int))),
(false, false, LeftSemi, Some(LocalRelation('a.int)))
)
testcases.foreach { case (left, right, jt, answer) =>
val query = testRelation1
.where(left)
.join(testRelation2.where(right), joinType = jt, condition = Some('a.attr == 'b.attr))
val optimized = Optimize.execute(query.analyze)
val correctAnswer =
answer.getOrElse(OptimizeWithoutPropagateEmptyRelation.execute(query.analyze))
comparePlans(optimized, correctAnswer)
}
}
test("propagate empty relation through UnaryNode") {
val query = testRelation1
.where(false)
.select('a)
.groupBy('a)('a)
.where('a > 1)
.orderBy('a.asc)
val optimized = Optimize.execute(query.analyze)
val correctAnswer = LocalRelation('a.int)
comparePlans(optimized, correctAnswer)
}
test("don't propagate non-empty local relation") {
val query = testRelation1
.where(true)
.groupBy('a)('a)
.where('a > 1)
.orderBy('a.asc)
.select('a)
val optimized = Optimize.execute(query.analyze)
val correctAnswer = testRelation1
.where('a > 1)
.groupBy('a)('a)
.orderBy('a.asc)
.select('a)
comparePlans(optimized, correctAnswer.analyze)
}
test("propagate empty relation through Aggregate without aggregate function") {
val query = testRelation1
.where(false)
.groupBy('a)('a, ('a + 1).as('x))
val optimized = Optimize.execute(query.analyze)
val correctAnswer = LocalRelation('a.int, 'x.int).analyze
comparePlans(optimized, correctAnswer)
}
test("don't propagate empty relation through Aggregate with aggregate function") {
val query = testRelation1
.where(false)
.groupBy('a)(count('a))
val optimized = Optimize.execute(query.analyze)
val correctAnswer = LocalRelation('a.int).groupBy('a)(count('a)).analyze
comparePlans(optimized, correctAnswer)
}
}
|
ZxlAaron/mypros
|
sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/PropagateEmptyRelationSuite.scala
|
Scala
|
apache-2.0
| 5,974 |
/* sbt -- Simple Build Tool
* Copyright 2009, 2010 Mark Harrah
*/
package xsbt.boot
import Pre._
import java.io.{ File, FileInputStream, FileOutputStream }
import java.util.{ Locale, Properties }
import scala.collection.immutable.List
object Initialize {
lazy val selectCreate = (_: AppProperty).create
lazy val selectQuick = (_: AppProperty).quick
lazy val selectFill = (_: AppProperty).fill
def create(file: File, promptCreate: String, enableQuick: Boolean, spec: List[AppProperty]) {
readLine(promptCreate + " (y/N" + (if (enableQuick) "/s" else "") + ") ") match {
case None => declined("")
case Some(line) =>
line.toLowerCase(Locale.ENGLISH) match {
case "y" | "yes" => process(file, spec, selectCreate)
case "s" => process(file, spec, selectQuick)
case "n" | "no" | "" => declined("")
case x =>
System.out.println(" '" + x + "' not understood.")
create(file, promptCreate, enableQuick, spec)
}
}
}
def fill(file: File, spec: List[AppProperty]): Unit = process(file, spec, selectFill)
def process(file: File, appProperties: List[AppProperty], select: AppProperty => Option[PropertyInit]) {
val properties = readProperties(file)
val uninitialized =
for (property <- appProperties; init <- select(property) if properties.getProperty(property.name) == null) yield initialize(properties, property.name, init)
if (!uninitialized.isEmpty) writeProperties(properties, file, "")
}
def initialize(properties: Properties, name: String, init: PropertyInit) {
init match {
case set: SetProperty => properties.setProperty(name, set.value)
case prompt: PromptProperty =>
def noValue = declined("No value provided for " + prompt.label)
readLine(prompt.label + prompt.default.toList.map(" [" + _ + "]").mkString + ": ") match {
case None => noValue
case Some(line) =>
val value = if (isEmpty(line)) orElse(prompt.default, noValue) else line
properties.setProperty(name, value)
}
}
}
}
|
xeno-by/old-scalameta-sbt
|
launch/src/main/scala/xsbt/boot/Create.scala
|
Scala
|
bsd-3-clause
| 2,122 |
/* NSC -- new Scala compiler
* Copyright 2007-2013 LAMP/EPFL
* @author Manohar Jonnalagedda
*/
package scala.tools.nsc
package doc
package base
package comment
import scala.collection._
/** A body of text. A comment has a single body, which is composed of
* at least one block. Inside every body is exactly one summary (see
* [[scala.tools.nsc.doc.model.comment.Summary]]). */
final case class Body(blocks: Seq[Block]) {
/** The summary text of the comment body. */
lazy val summary: Option[Inline] = {
def summaryInBlock(block: Block): Seq[Inline] = block match {
case Title(text, _) => summaryInInline(text)
case Paragraph(text) => summaryInInline(text)
case UnorderedList(items) => items flatMap summaryInBlock
case OrderedList(items, _) => items flatMap summaryInBlock
case DefinitionList(items) => items.values.toSeq flatMap summaryInBlock
case _ => Nil
}
def summaryInInline(text: Inline): Seq[Inline] = text match {
case Summary(text) => List(text)
case Chain(items) => items flatMap summaryInInline
case Italic(text) => summaryInInline(text)
case Bold(text) => summaryInInline(text)
case Underline(text) => summaryInInline(text)
case Superscript(text) => summaryInInline(text)
case Subscript(text) => summaryInInline(text)
case Link(_, title) => summaryInInline(title)
case _ => Nil
}
(blocks flatMap { summaryInBlock(_) }).toList match {
case Nil => None
case inline :: Nil => Some(inline)
case inlines => Some(Chain(inlines))
}
}
}
/** A block-level element of text, such as a paragraph or code block. */
sealed abstract class Block
final case class Title(text: Inline, level: Int) extends Block
final case class Paragraph(text: Inline) extends Block
final case class Code(data: String) extends Block
final case class UnorderedList(items: Seq[Block]) extends Block
final case class OrderedList(items: Seq[Block], style: String) extends Block
final case class DefinitionList(items: SortedMap[Inline, Block]) extends Block
final case class HorizontalRule() extends Block
/** An section of text inside a block, possibly with formatting. */
sealed abstract class Inline
final case class Chain(items: Seq[Inline]) extends Inline
final case class Italic(text: Inline) extends Inline
final case class Bold(text: Inline) extends Inline
final case class Underline(text: Inline) extends Inline
final case class Superscript(text: Inline) extends Inline
final case class Subscript(text: Inline) extends Inline
final case class Link(target: String, title: Inline) extends Inline
final case class Monospace(text: Inline) extends Inline
final case class Text(text: String) extends Inline
abstract class EntityLink(val title: Inline) extends Inline { def link: LinkTo }
object EntityLink {
def apply(title: Inline, linkTo: LinkTo) = new EntityLink(title) { def link: LinkTo = linkTo }
def unapply(el: EntityLink): Option[(Inline, LinkTo)] = Some((el.title, el.link))
}
final case class HtmlTag(data: String) extends Inline {
private val Pattern = """(?ms)\A<(/?)(.*?)[\s>].*\z""".r
private val (isEnd, tagName) = data match {
case Pattern(s1, s2) =>
(! s1.isEmpty, Some(s2.toLowerCase))
case _ =>
(false, None)
}
def canClose(open: HtmlTag) = {
isEnd && tagName == open.tagName
}
private val TagsNotToClose = Set("br", "img")
def close = tagName collect { case name if !TagsNotToClose(name) => HtmlTag(s"</$name>") }
}
/** The summary of a comment, usually its first sentence. There must be exactly one summary per body. */
final case class Summary(text: Inline) extends Inline
|
felixmulder/scala
|
src/scaladoc/scala/tools/nsc/doc/base/comment/Body.scala
|
Scala
|
bsd-3-clause
| 3,749 |
package com.googlecode.kanbanik.commands
import com.googlecode.kanbanik.builders.{PermissionsBuilder, UserBuilder}
import com.googlecode.kanbanik.dtos._
import com.googlecode.kanbanik.model.User
import com.googlecode.kanbanik.security._
class EditUserCommand extends BaseUserCommand with CredentialsUtils {
override def execute(params: ManipulateUserDto): Either[UserDto, ErrorDto] = {
val user = User.byId(params.userName)
val newPermissions = if (params.permissions.isDefined) {
val incorrectPermissions = findIncorrectPermissions(params.permissions.get)
if (incorrectPermissions.isDefined) {
return Right(incorrectPermissions.get)
}
params.permissions.get.map(PermissionsBuilder.buildEntity(_))
} else {
user.permissions
}
val (resPassword, resSalt): (String, String) = if (
params.newPassword != null && params.newPassword != "" &&
!user.unloggedFakeUser
) {
hashPassword(params.newPassword)
} else {
(user.password, user.salt)
}
val newUser = user.copy(
password = resPassword,
salt = resSalt,
realName = params.realName,
version = params.version,
pictureUrl = params.pictureUrl,
permissions = newPermissions
).store
new Left(UserBuilder.buildDto(newUser, params.sessionId.get))
}
override def baseCheck(param: ManipulateUserDto): (Check, String) = checkOneOf(PermissionType.EditUserData, param.userName)
override def composeFullCheck(param: ManipulateUserDto, baseCheck: (Check, String), allPermissionsIWantToSet: List[(Check, String)]): List[(Check, String)] =
checkOneOf(PermissionType.EditUserPermissions, param.userName) :: baseCheck :: allPermissionsIWantToSet
}
|
gudtago/kanbanik
|
kanbanik-server/src/main/scala/com/googlecode/kanbanik/commands/EditUserCommand.scala
|
Scala
|
apache-2.0
| 1,737 |
/*
Copyright 2013 Twitter, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.twitter.summingbird.planner
import com.twitter.summingbird._
case class ProducerF[P <: Platform[P]](oldSources: List[Producer[P, Any]],
oldRef: Producer[P, Any],
f: List[Producer[P, Any]] => Producer[P, Any])
object StripNamedNode {
def castTail[P <: Platform[P]](node: Producer[P, Any]): TailProducer[P, Any] = node.asInstanceOf[TailProducer[P, Any]]
def castToPair[P <: Platform[P]](node: Producer[P, Any]): Producer[P, (Any, Any)] = node.asInstanceOf[Producer[P, (Any, Any)]]
def processLevel[P <: Platform[P]](optLast: Option[Producer[P, Any]],
l: TraversableOnce[ProducerF[P]],
m: Map[Producer[P, Any], Producer[P, Any]],
op: PartialFunction[Producer[P, Any], Option[Producer[P, Any]]]): (Option[Producer[P, Any]], Map[Producer[P, Any], Producer[P, Any]]) = {
l.foldLeft((optLast, m)) {
case ((nOptLast, nm), pp) =>
val ns = pp.oldSources.map(m(_))
val res = pp.f(ns)
val mutatedRes = if (op.isDefinedAt(res)) {
op(res) match {
case Some(p) => p
case None => ns(0)
}
} else {
res
}
(Some(mutatedRes), (nm + (pp.oldRef -> mutatedRes)))
}
}
def functionize[P <: Platform[P]](node: Producer[P, Any]): ProducerF[P] = {
node match {
// This case is special/different since AlsoTailProducer needs the full class maintained(unlike TailNamedProducer),
// but it is not a case class. It inherits from TailProducer so cannot be one.
case p: AlsoTailProducer[_, _, _] =>
ProducerF(
List(p.result.asInstanceOf[Producer[P, Any]], p.ensure.asInstanceOf[Producer[P, Any]]),
p,
{ (newEntries): List[Producer[P, Any]] => new AlsoTailProducer[P, Any, Any](castTail(newEntries(1)), castTail(newEntries(0))) }
)
case p @ AlsoProducer(_, _) => ProducerF(
List(p.result, p.ensure),
p,
{ (newEntries): List[Producer[P, Any]] => p.copy(ensure = castTail(newEntries(1)), result = newEntries(0)) }
)
case p @ NamedProducer(producer, _) => ProducerF(
List(producer),
p,
{ producerL: List[Producer[P, Any]] => p.copy(producer = producerL(0)) }
)
case p @ Source(_) => ProducerF(
List(),
p,
{ producerL: List[Producer[P, Any]] => p }
)
case p @ IdentityKeyedProducer(producer) => ProducerF(
List(producer),
p,
{ producerL: List[Producer[P, Any]] => p.copy(producer = castToPair(producerL(0))) }
)
case p @ OptionMappedProducer(producer, _) => ProducerF(
List(producer),
p,
{ producerL: List[Producer[P, Any]] => p.copy(producer = producerL(0)) }
)
case p @ FlatMappedProducer(producer, _) => ProducerF(
List(producer),
p,
{ producerL: List[Producer[P, Any]] => p.copy(producer = producerL(0)) }
)
case p @ ValueFlatMappedProducer(producer, _) => ProducerF(
List(producer),
p,
{ producerL: List[Producer[P, Any]] => p.copy(producer = castToPair(producerL(0))) }
)
case p @ KeyFlatMappedProducer(producer, _) => ProducerF(
List(producer),
p,
{ producerL: List[Producer[P, Any]] => p.copy(producer = castToPair(producerL(0))) }
)
case p @ MergedProducer(oL, oR) => ProducerF(
List(oL, oR),
p,
{ producerL: List[Producer[P, Any]] => p.copy(left = producerL(0), right = producerL(1)) }
)
case p @ LeftJoinedProducer(producer, _) => ProducerF(
List(producer),
p,
{ producerL: List[Producer[P, Any]] => p.copy(left = castToPair(producerL(0))) }
)
case p @ Summer(producer, _, _) => ProducerF(
List(producer),
p,
{ producerL: List[Producer[P, Any]] => p.copy(producer = castToPair(producerL(0))) }
)
case p @ WrittenProducer(producer, _) => ProducerF(
List(producer),
p,
{ producerL: List[Producer[P, Any]] => p.copy(producer = producerL(0)) }
)
}
}
def toFunctional[P <: Platform[P]](tail: Producer[P, Any]) =
graph
.dagDepth(Producer.entireGraphOf(tail))(Producer.parentsOf(_))
.toSeq
.groupBy(_._2)
.mapValues(_.map(_._1))
.mapValues(_.map(functionize(_)))
.toSeq
def mutateGraph[P <: Platform[P]](tail: Producer[P, Any], op: PartialFunction[Producer[P, Any], Option[Producer[P, Any]]]) = {
val newT: Option[Producer[P, Any]] = None
val x = toFunctional(tail).sortBy(_._1)
x.map(_._2).foldLeft((newT, Map[Producer[P, Any], Producer[P, Any]]())) {
case ((optLast, curMap), v) =>
processLevel(optLast, v, curMap, op)
}
}
def stripNamedNodes[P <: Platform[P]](node: Producer[P, Any]): (Map[Producer[P, Any], Producer[P, Any]], Producer[P, Any]) = {
def removeNamed: PartialFunction[Producer[P, Any], Option[Producer[P, Any]]] =
{ case p @ NamedProducer(p2, _) => None }
val (optTail, oldNewMap) = mutateGraph(node, removeNamed)
val newTail = optTail.get
(oldNewMap.map(x => (x._2, x._1)).toMap, optTail.get)
}
// Priority list of of names for a given producer
private def getName[P <: Platform[P]](dependants: Dependants[P], producer: Producer[P, Any]): List[String] = {
(producer :: dependants.transitiveDependantsOf(producer)).collect { case NamedProducer(_, n) => n }
}
def apply[P <: Platform[P], T](tail: TailProducer[P, T]): (Map[Producer[P, Any], List[String]], TailProducer[P, T]) = {
val dependants = Dependants(tail)
val (oldProducerToNewMap, newTail) = stripNamedNodes(tail)
(oldProducerToNewMap.mapValues(n => getName(dependants, n)), newTail.asInstanceOf[TailProducer[P, T]])
}
}
|
rangadi/summingbird
|
summingbird-online/src/main/scala/com/twitter/summingbird/planner/StripNamedNodes.scala
|
Scala
|
apache-2.0
| 6,339 |
/*
* Copyright 2011-2018 GatlingCorp (http://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.http.action
import io.gatling.core.action.builder.ActionBuilder
import io.gatling.core.protocol.ProtocolComponentsRegistry
import io.gatling.http.protocol.{ HttpProtocol, HttpComponents }
abstract class HttpActionBuilder extends ActionBuilder {
def lookUpHttpComponents(protocolComponentsRegistry: ProtocolComponentsRegistry): HttpComponents =
protocolComponentsRegistry.components(HttpProtocol.HttpProtocolKey)
}
|
wiacekm/gatling
|
gatling-http/src/main/scala/io/gatling/http/action/HttpActionBuilder.scala
|
Scala
|
apache-2.0
| 1,060 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive
import java.io.{InputStream, OutputStream}
import java.rmi.server.UID
import scala.collection.JavaConverters._
import scala.language.implicitConversions
import scala.reflect.ClassTag
import com.google.common.base.Objects
import org.apache.avro.Schema
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.apache.hadoop.hive.ql.exec.{UDF, Utilities}
import org.apache.hadoop.hive.ql.plan.{FileSinkDesc, TableDesc}
import org.apache.hadoop.hive.ql.udf.generic.GenericUDFMacro
import org.apache.hadoop.hive.serde2.ColumnProjectionUtils
import org.apache.hadoop.hive.serde2.avro.{AvroGenericRecordWritable, AvroSerdeUtils}
import org.apache.hadoop.hive.serde2.objectinspector.primitive.HiveDecimalObjectInspector
import org.apache.hadoop.io.Writable
import org.apache.hive.com.esotericsoftware.kryo.Kryo
import org.apache.hive.com.esotericsoftware.kryo.io.{Input, Output}
import org.apache.spark.internal.Logging
import org.apache.spark.sql.types.Decimal
import org.apache.spark.util.Utils
private[hive] object HiveShim {
// Precision and scale to pass for unlimited decimals; these are the same as the precision and
// scale Hive 0.13 infers for BigDecimals from sources that don't specify them (e.g. UDFs)
val UNLIMITED_DECIMAL_PRECISION = 38
val UNLIMITED_DECIMAL_SCALE = 18
val HIVE_GENERIC_UDF_MACRO_CLS = "org.apache.hadoop.hive.ql.udf.generic.GenericUDFMacro"
/*
* This function in hive-0.13 become private, but we have to do this to walkaround hive bug
*/
private def appendReadColumnNames(conf: Configuration, cols: Seq[String]) {
val old: String = conf.get(ColumnProjectionUtils.READ_COLUMN_NAMES_CONF_STR, "")
val result: StringBuilder = new StringBuilder(old)
var first: Boolean = old.isEmpty
for (col <- cols) {
if (first) {
first = false
} else {
result.append(',')
}
result.append(col)
}
conf.set(ColumnProjectionUtils.READ_COLUMN_NAMES_CONF_STR, result.toString)
}
/*
* Cannot use ColumnProjectionUtils.appendReadColumns directly, if ids is null or empty
*/
def appendReadColumns(conf: Configuration, ids: Seq[Integer], names: Seq[String]) {
if (ids != null && ids.nonEmpty) {
ColumnProjectionUtils.appendReadColumns(conf, ids.asJava)
}
if (names != null && names.nonEmpty) {
appendReadColumnNames(conf, names)
}
}
/*
* Bug introduced in hive-0.13. AvroGenericRecordWritable has a member recordReaderID that
* is needed to initialize before serialization.
*/
def prepareWritable(w: Writable, serDeProps: Seq[(String, String)]): Writable = {
w match {
case w: AvroGenericRecordWritable =>
w.setRecordReaderID(new UID())
// In Hive 1.1, the record's schema may need to be initialized manually or a NPE will
// be thrown.
if (w.getFileSchema() == null) {
serDeProps
.find(_._1 == AvroSerdeUtils.AvroTableProperties.SCHEMA_LITERAL.getPropName())
.foreach { kv =>
w.setFileSchema(new Schema.Parser().parse(kv._2))
}
}
case _ =>
}
w
}
def toCatalystDecimal(hdoi: HiveDecimalObjectInspector, data: Any): Decimal = {
if (hdoi.preferWritable()) {
Decimal(hdoi.getPrimitiveWritableObject(data).getHiveDecimal().bigDecimalValue,
hdoi.precision(), hdoi.scale())
} else {
Decimal(hdoi.getPrimitiveJavaObject(data).bigDecimalValue(), hdoi.precision(), hdoi.scale())
}
}
/**
* This class provides the UDF creation and also the UDF instance serialization and
* de-serialization cross process boundary.
*
* Detail discussion can be found at https://github.com/apache/spark/pull/3640
*
* @param functionClassName UDF class name
* @param instance optional UDF instance which contains additional information (for macro)
*/
private[hive] case class HiveFunctionWrapper(var functionClassName: String,
private var instance: AnyRef = null) extends java.io.Externalizable {
// for Serialization
def this() = this(null)
override def hashCode(): Int = {
if (functionClassName == HIVE_GENERIC_UDF_MACRO_CLS) {
Objects.hashCode(functionClassName, instance.asInstanceOf[GenericUDFMacro].getBody())
} else {
functionClassName.hashCode()
}
}
override def equals(other: Any): Boolean = other match {
case a: HiveFunctionWrapper if functionClassName == a.functionClassName =>
// In case of udf macro, check to make sure they point to the same underlying UDF
if (functionClassName == HIVE_GENERIC_UDF_MACRO_CLS) {
a.instance.asInstanceOf[GenericUDFMacro].getBody() ==
instance.asInstanceOf[GenericUDFMacro].getBody()
} else {
true
}
case _ => false
}
@transient
def deserializeObjectByKryo[T: ClassTag](
kryo: Kryo,
in: InputStream,
clazz: Class[_]): T = {
val inp = new Input(in)
val t: T = kryo.readObject(inp, clazz).asInstanceOf[T]
inp.close()
t
}
@transient
def serializeObjectByKryo(
kryo: Kryo,
plan: Object,
out: OutputStream) {
val output: Output = new Output(out)
kryo.writeObject(output, plan)
output.close()
}
def deserializePlan[UDFType](is: java.io.InputStream, clazz: Class[_]): UDFType = {
deserializeObjectByKryo(Utilities.runtimeSerializationKryo.get(), is, clazz)
.asInstanceOf[UDFType]
}
def serializePlan(function: AnyRef, out: java.io.OutputStream): Unit = {
serializeObjectByKryo(Utilities.runtimeSerializationKryo.get(), function, out)
}
def writeExternal(out: java.io.ObjectOutput) {
// output the function name
out.writeUTF(functionClassName)
// Write a flag if instance is null or not
out.writeBoolean(instance != null)
if (instance != null) {
// Some of the UDF are serializable, but some others are not
// Hive Utilities can handle both cases
val baos = new java.io.ByteArrayOutputStream()
serializePlan(instance, baos)
val functionInBytes = baos.toByteArray
// output the function bytes
out.writeInt(functionInBytes.length)
out.write(functionInBytes, 0, functionInBytes.length)
}
}
def readExternal(in: java.io.ObjectInput) {
// read the function name
functionClassName = in.readUTF()
if (in.readBoolean()) {
// if the instance is not null
// read the function in bytes
val functionInBytesLength = in.readInt()
val functionInBytes = new Array[Byte](functionInBytesLength)
in.readFully(functionInBytes)
// deserialize the function object via Hive Utilities
instance = deserializePlan[AnyRef](new java.io.ByteArrayInputStream(functionInBytes),
Utils.getContextOrSparkClassLoader.loadClass(functionClassName))
}
}
def createFunction[UDFType <: AnyRef](): UDFType = {
if (instance != null) {
instance.asInstanceOf[UDFType]
} else {
val func = Utils.getContextOrSparkClassLoader
.loadClass(functionClassName).newInstance.asInstanceOf[UDFType]
if (!func.isInstanceOf[UDF]) {
// We cache the function if it's no the Simple UDF,
// as we always have to create new instance for Simple UDF
instance = func
}
func
}
}
}
/*
* Bug introduced in hive-0.13. FileSinkDesc is serializable, but its member path is not.
* Fix it through wrapper.
*/
implicit def wrapperToFileSinkDesc(w: ShimFileSinkDesc): FileSinkDesc = {
val f = new FileSinkDesc(new Path(w.dir), w.tableInfo, w.compressed)
f.setCompressCodec(w.compressCodec)
f.setCompressType(w.compressType)
f.setTableInfo(w.tableInfo)
f.setDestTableId(w.destTableId)
f
}
/*
* Bug introduced in hive-0.13. FileSinkDesc is serializable, but its member path is not.
* Fix it through wrapper.
*/
private[hive] class ShimFileSinkDesc(
var dir: String,
var tableInfo: TableDesc,
var compressed: Boolean)
extends Serializable with Logging {
var compressCodec: String = _
var compressType: String = _
var destTableId: Int = _
def setCompressed(compressed: Boolean) {
this.compressed = compressed
}
def getDirName(): String = dir
def setDestTableId(destTableId: Int) {
this.destTableId = destTableId
}
def setTableInfo(tableInfo: TableDesc) {
this.tableInfo = tableInfo
}
def setCompressCodec(intermediateCompressorCodec: String) {
compressCodec = intermediateCompressorCodec
}
def setCompressType(intermediateCompressType: String) {
compressType = intermediateCompressType
}
}
}
|
gioenn/xSpark
|
sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveShim.scala
|
Scala
|
apache-2.0
| 9,710 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.