code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
package collins.controllers.actions.assettype
import scala.concurrent.Future
import play.api.libs.concurrent.Execution.Implicits.defaultContext
import play.api.libs.json.JsNumber
import play.api.libs.json.JsObject
import collins.controllers.Api
import collins.controllers.ResponseData
import collins.controllers.SecureController
import collins.controllers.actions.RequestDataHolder
import collins.controllers.actions.SecureAction
import collins.models.AssetType
import collins.util.MessageHelper
import collins.util.security.SecuritySpecification
import collins.validation.StringUtil
object DeleteAction {
object Messages extends MessageHelper("controllers.AssetTypeApi.deleteAssetType") {
def invalidName = messageWithDefault("invalidName", "The specified name is invalid")
def noSuchName = messageWithDefault("noSuchName", "The specified name does not exist")
def systemName = messageWithDefault("systemName",
"The specified name is reserved and can not be deleted")
}
}
/**
* @include DeleteAction.desc
*
* Delete an asset type
*
* @apigroup AssetType
* @apimethod DELETE
* @apiurl /api/assettype/:name
* @apiparam name String The name of the asset type to delete
* @apirespond 202 success - delete accepted
* @apirespond 400 invalid input
* @apirespond 404 invalid asset type name
* @apirespond 409 system name can not be deleted
* @apirespond 500 error deleting asset type
* @apiperm controllers.AssetTypeApi.deleteAssetType
* @collinsshell {{{
* collins-shell asset_type delete NAME
* }}}
* @curlexample {{{
* curl -v -u blake:admin:first --basic \\
* -X DELETE \\
* http://localhost:9000/api/assettype/SERVICE
* }}}
*/
case class DeleteAction(
name: String,
spec: SecuritySpecification,
handler: SecureController
) extends SecureAction(spec, handler) {
import DeleteAction.Messages._
case class ActionDataHolder(atype: AssetType) extends RequestDataHolder
override def validate(): Validation = {
StringUtil.trim(name).filter(s => s.size > 1 && s.size <= 32).map(_.toUpperCase) match {
case None => Left(RequestDataHolder.error400(invalidName))
case Some(vname) => AssetType.findByName(vname) match {
case None =>
Left(RequestDataHolder.error404(noSuchName))
case Some(atype) => AssetType.isSystemType(atype) match {
case true =>
Left(RequestDataHolder.error409(systemName))
case false =>
Right(ActionDataHolder(atype))
}
}
}
}
override def execute(rdh: RequestDataHolder) = Future {
rdh match {
case ActionDataHolder(atype) => try {
AssetType.delete(atype)
ResponseData(Status.Accepted, JsObject(Seq("DELETED" -> JsNumber(1))))
} catch {
case e: Throwable =>
Api.errorResponse(
"Failed to delete asset type %s".format(atype.name),
Status.InternalServerError,
Some(e)
)
}
}
}
}
|
byxorna/collins
|
app/collins/controllers/actions/assettype/DeleteAction.scala
|
Scala
|
apache-2.0
| 2,972 |
/*
* Copyright (c) 2017 Georgios Andreadakis
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.tap.domain.docimport
import org.tap.domain.DocumentRepository
/**
* Defines the collaborators, a repository and parser, for the document import.
*
* @author Georgios Andreadakis ([email protected])
*/
trait DocumentImportContext {
def repository: DocumentRepository
def parser: DocumentParser
}
|
GeorgiosAndreadakis/TextAnalyserPlatform
|
domain/src/main/scala/org/tap/domain/docimport/DocumentImportContext.scala
|
Scala
|
apache-2.0
| 944 |
package pureconfig.module
import scala.reflect.ClassTag
import _root_.enumeratum._
import _root_.enumeratum.values._
import pureconfig.ConfigConvert
import pureconfig.ConfigConvert.{ viaNonEmptyString, viaNonEmptyStringOpt, viaStringOpt }
import pureconfig.error.CannotConvert
package object enumeratum {
implicit def enumeratumConfigConvert[A <: EnumEntry](implicit enum: Enum[A], ct: ClassTag[A]): ConfigConvert[A] =
viaNonEmptyStringOpt[A](enum.withNameOption, _.entryName)
implicit def enumeratumIntConfigConvert[A <: IntEnumEntry](implicit enum: IntEnum[A], ct: ClassTag[A]): ConfigConvert[A] =
viaNonEmptyStringOpt[A](v => enum.withValueOpt(v.toInt), _.value.toString)
implicit def enumeratumLongConfigConvert[A <: LongEnumEntry](implicit enum: LongEnum[A], ct: ClassTag[A]): ConfigConvert[A] =
viaNonEmptyStringOpt[A](v => enum.withValueOpt(v.toLong), _.value.toString)
implicit def enumeratumShortConfigConvert[A <: ShortEnumEntry](implicit enum: ShortEnum[A], ct: ClassTag[A]): ConfigConvert[A] =
viaNonEmptyStringOpt[A](v => enum.withValueOpt(v.toShort), _.value.toString)
implicit def enumeratumStringConfigConvert[A <: StringEnumEntry](implicit enum: StringEnum[A], ct: ClassTag[A]): ConfigConvert[A] =
viaStringOpt[A](v => enum.withValueOpt(v), _.value.toString)
implicit def enumeratumByteConfigConvert[A <: ByteEnumEntry](implicit enum: ByteEnum[A], ct: ClassTag[A]): ConfigConvert[A] =
viaNonEmptyStringOpt[A](v => enum.withValueOpt(v.toByte), _.value.toString)
implicit def enumeratumCharConfigConvert[A <: CharEnumEntry](implicit enum: CharEnum[A], ct: ClassTag[A]): ConfigConvert[A] =
viaNonEmptyString[A](
s => location => ensureOneChar(s) match {
case Right(v) => Right(enum.withValue(v))
case Left(msg) => Left(CannotConvert(s, ct.runtimeClass.getSimpleName, msg, location, None))
},
_.value.toString)
private val ensureOneChar: Seq[Char] => Either[String, Char] = {
case Seq(c) => Right(c)
case s => Left(s"""Cannot read a character value from "$s"""")
}
}
|
derekmorr/pureconfig
|
modules/enumeratum/src/main/scala/pureconfig/module/enumeratum.scala
|
Scala
|
mpl-2.0
| 2,079 |
/*
* Copyright 2011-2018 GatlingCorp (http://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.core.structure
import io.gatling.core.action.Action
/**
* This trait defines most of the scenario related DSL
*/
trait StructureBuilder[B <: StructureBuilder[B]]
extends Execs[B]
with Pauses[B]
with Feeds[B]
with Loops[B]
with ConditionalStatements[B]
with Errors[B]
with Groups[B] {
private[gatling] def build(ctx: ScenarioContext, chainNext: Action): Action =
actionBuilders.foldLeft(chainNext) { (next, actionBuilder) =>
actionBuilder.build(ctx, next)
}
}
|
wiacekm/gatling
|
gatling-core/src/main/scala/io/gatling/core/structure/StructureBuilder.scala
|
Scala
|
apache-2.0
| 1,131 |
/*
* Copyright 2001-2015 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest
import org.scalactic.NameUtil
private[scalatest] case class DeferredAbortedSuite(suiteClassName: String, t: Throwable) extends Suite {
override def run(testName: Option[String], args: Args): Status = {
throw t
}
override def suiteName: String = NameUtil.stripDollars(NameUtil.parseSimpleName(suiteClassName))
}
|
dotty-staging/scalatest
|
scalatest/src/main/scala/org/scalatest/DeferredAbortedSuite.scala
|
Scala
|
apache-2.0
| 948 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy.yarn
import java.io.{File, FileOutputStream, IOException, OutputStreamWriter}
import java.net.{InetAddress, UnknownHostException, URI}
import java.nio.ByteBuffer
import java.nio.charset.StandardCharsets
import java.util.{Locale, Properties, UUID}
import java.util.zip.{ZipEntry, ZipOutputStream}
import scala.collection.JavaConverters._
import scala.collection.mutable.{ArrayBuffer, HashMap, HashSet, ListBuffer, Map}
import scala.util.control.NonFatal
import com.google.common.base.Objects
import com.google.common.io.Files
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs._
import org.apache.hadoop.fs.permission.FsPermission
import org.apache.hadoop.io.DataOutputBuffer
import org.apache.hadoop.mapreduce.MRJobConfig
import org.apache.hadoop.security.{Credentials, UserGroupInformation}
import org.apache.hadoop.util.StringUtils
import org.apache.hadoop.yarn.api._
import org.apache.hadoop.yarn.api.ApplicationConstants.Environment
import org.apache.hadoop.yarn.api.protocolrecords._
import org.apache.hadoop.yarn.api.records._
import org.apache.hadoop.yarn.client.api.{YarnClient, YarnClientApplication}
import org.apache.hadoop.yarn.conf.YarnConfiguration
import org.apache.hadoop.yarn.exceptions.ApplicationNotFoundException
import org.apache.hadoop.yarn.util.Records
import org.apache.spark.{SecurityManager, SparkConf, SparkException}
import org.apache.spark.deploy.SparkHadoopUtil
import org.apache.spark.deploy.yarn.config._
import org.apache.spark.deploy.yarn.security.ConfigurableCredentialManager
import org.apache.spark.internal.Logging
import org.apache.spark.internal.config._
import org.apache.spark.launcher.{LauncherBackend, SparkAppHandle, YarnCommandBuilderUtils}
import org.apache.spark.util.{CallerContext, Utils}
private[spark] class Client(
val args: ClientArguments,
val hadoopConf: Configuration,
val sparkConf: SparkConf)
extends Logging {
import Client._
import YarnSparkHadoopUtil._
def this(clientArgs: ClientArguments, spConf: SparkConf) =
this(clientArgs, SparkHadoopUtil.get.newConfiguration(spConf), spConf)
private val yarnClient = YarnClient.createYarnClient
private val yarnConf = new YarnConfiguration(hadoopConf)
private val isClusterMode = sparkConf.get("spark.submit.deployMode", "client") == "cluster"
// AM related configurations
private val amMemory = if (isClusterMode) {
sparkConf.get(DRIVER_MEMORY).toInt
} else {
sparkConf.get(AM_MEMORY).toInt
}
private val amMemoryOverhead = {
val amMemoryOverheadEntry = if (isClusterMode) DRIVER_MEMORY_OVERHEAD else AM_MEMORY_OVERHEAD
sparkConf.get(amMemoryOverheadEntry).getOrElse(
math.max((MEMORY_OVERHEAD_FACTOR * amMemory).toLong, MEMORY_OVERHEAD_MIN)).toInt
}
private val amCores = if (isClusterMode) {
sparkConf.get(DRIVER_CORES)
} else {
sparkConf.get(AM_CORES)
}
// Executor related configurations
private val executorMemory = sparkConf.get(EXECUTOR_MEMORY)
private val executorMemoryOverhead = sparkConf.get(EXECUTOR_MEMORY_OVERHEAD).getOrElse(
math.max((MEMORY_OVERHEAD_FACTOR * executorMemory).toLong, MEMORY_OVERHEAD_MIN)).toInt
private val distCacheMgr = new ClientDistributedCacheManager()
private var loginFromKeytab = false
private var principal: String = null
private var keytab: String = null
private var credentials: Credentials = null
private var amKeytabFileName: String = null
private val launcherBackend = new LauncherBackend() {
override def onStopRequest(): Unit = {
if (isClusterMode && appId != null) {
yarnClient.killApplication(appId)
} else {
setState(SparkAppHandle.State.KILLED)
stop()
}
}
}
private val fireAndForget = isClusterMode && !sparkConf.get(WAIT_FOR_APP_COMPLETION)
private var appId: ApplicationId = null
// The app staging dir based on the STAGING_DIR configuration if configured
// otherwise based on the users home directory.
private val appStagingBaseDir = sparkConf.get(STAGING_DIR).map { new Path(_) }
.getOrElse(FileSystem.get(hadoopConf).getHomeDirectory())
private val credentialManager = new ConfigurableCredentialManager(sparkConf, hadoopConf)
def reportLauncherState(state: SparkAppHandle.State): Unit = {
launcherBackend.setState(state)
}
def stop(): Unit = {
launcherBackend.close()
yarnClient.stop()
// Unset YARN mode system env variable, to allow switching between cluster types.
System.clearProperty("SPARK_YARN_MODE")
}
/**
* Submit an application running our ApplicationMaster to the ResourceManager.
*
* The stable Yarn API provides a convenience method (YarnClient#createApplication) for
* creating applications and setting up the application submission context. This was not
* available in the alpha API.
*/
def submitApplication(): ApplicationId = {
var appId: ApplicationId = null
try {
launcherBackend.connect()
// Setup the credentials before doing anything else,
// so we have don't have issues at any point.
setupCredentials()
yarnClient.init(yarnConf)
yarnClient.start()
logInfo("Requesting a new application from cluster with %d NodeManagers"
.format(yarnClient.getYarnClusterMetrics.getNumNodeManagers))
// Get a new application from our RM
val newApp = yarnClient.createApplication()
val newAppResponse = newApp.getNewApplicationResponse()
appId = newAppResponse.getApplicationId()
new CallerContext("CLIENT", sparkConf.get(APP_CALLER_CONTEXT),
Option(appId.toString)).setCurrentContext()
// Verify whether the cluster has enough resources for our AM
verifyClusterResources(newAppResponse)
// Set up the appropriate contexts to launch our AM
val containerContext = createContainerLaunchContext(newAppResponse)
val appContext = createApplicationSubmissionContext(newApp, containerContext)
// Finally, submit and monitor the application
logInfo(s"Submitting application $appId to ResourceManager")
yarnClient.submitApplication(appContext)
launcherBackend.setAppId(appId.toString)
reportLauncherState(SparkAppHandle.State.SUBMITTED)
appId
} catch {
case e: Throwable =>
if (appId != null) {
cleanupStagingDir(appId)
}
throw e
}
}
/**
* Cleanup application staging directory.
*/
private def cleanupStagingDir(appId: ApplicationId): Unit = {
val stagingDirPath = new Path(appStagingBaseDir, getAppStagingDir(appId))
try {
val preserveFiles = sparkConf.get(PRESERVE_STAGING_FILES)
val fs = stagingDirPath.getFileSystem(hadoopConf)
if (!preserveFiles && fs.delete(stagingDirPath, true)) {
logInfo(s"Deleted staging directory $stagingDirPath")
}
} catch {
case ioe: IOException =>
logWarning("Failed to cleanup staging dir " + stagingDirPath, ioe)
}
}
/**
* Set up the context for submitting our ApplicationMaster.
* This uses the YarnClientApplication not available in the Yarn alpha API.
*/
def createApplicationSubmissionContext(
newApp: YarnClientApplication,
containerContext: ContainerLaunchContext): ApplicationSubmissionContext = {
val appContext = newApp.getApplicationSubmissionContext
appContext.setApplicationName(sparkConf.get("spark.app.name", "Spark"))
appContext.setQueue(sparkConf.get(QUEUE_NAME))
appContext.setAMContainerSpec(containerContext)
appContext.setApplicationType("SPARK")
sparkConf.get(APPLICATION_TAGS).foreach { tags =>
appContext.setApplicationTags(new java.util.HashSet[String](tags.asJava))
}
sparkConf.get(MAX_APP_ATTEMPTS) match {
case Some(v) => appContext.setMaxAppAttempts(v)
case None => logDebug(s"${MAX_APP_ATTEMPTS.key} is not set. " +
"Cluster's default value will be used.")
}
sparkConf.get(AM_ATTEMPT_FAILURE_VALIDITY_INTERVAL_MS).foreach { interval =>
appContext.setAttemptFailuresValidityInterval(interval)
}
val capability = Records.newRecord(classOf[Resource])
capability.setMemory(amMemory + amMemoryOverhead)
capability.setVirtualCores(amCores)
sparkConf.get(AM_NODE_LABEL_EXPRESSION) match {
case Some(expr) =>
val amRequest = Records.newRecord(classOf[ResourceRequest])
amRequest.setResourceName(ResourceRequest.ANY)
amRequest.setPriority(Priority.newInstance(0))
amRequest.setCapability(capability)
amRequest.setNumContainers(1)
amRequest.setNodeLabelExpression(expr)
appContext.setAMContainerResourceRequest(amRequest)
case None =>
appContext.setResource(capability)
}
sparkConf.get(ROLLED_LOG_INCLUDE_PATTERN).foreach { includePattern =>
try {
val logAggregationContext = Records.newRecord(classOf[LogAggregationContext])
// These two methods were added in Hadoop 2.6.4, so we still need to use reflection to
// avoid compile error when building against Hadoop 2.6.0 ~ 2.6.3.
val setRolledLogsIncludePatternMethod =
logAggregationContext.getClass.getMethod("setRolledLogsIncludePattern", classOf[String])
setRolledLogsIncludePatternMethod.invoke(logAggregationContext, includePattern)
sparkConf.get(ROLLED_LOG_EXCLUDE_PATTERN).foreach { excludePattern =>
val setRolledLogsExcludePatternMethod =
logAggregationContext.getClass.getMethod("setRolledLogsExcludePattern", classOf[String])
setRolledLogsExcludePatternMethod.invoke(logAggregationContext, excludePattern)
}
appContext.setLogAggregationContext(logAggregationContext)
} catch {
case NonFatal(e) =>
logWarning(s"Ignoring ${ROLLED_LOG_INCLUDE_PATTERN.key} because the version of YARN " +
"does not support it", e)
}
}
appContext
}
/** Set up security tokens for launching our ApplicationMaster container. */
private def setupSecurityToken(amContainer: ContainerLaunchContext): Unit = {
val dob = new DataOutputBuffer
credentials.writeTokenStorageToStream(dob)
amContainer.setTokens(ByteBuffer.wrap(dob.getData))
}
/** Get the application report from the ResourceManager for an application we have submitted. */
def getApplicationReport(appId: ApplicationId): ApplicationReport =
yarnClient.getApplicationReport(appId)
/**
* Return the security token used by this client to communicate with the ApplicationMaster.
* If no security is enabled, the token returned by the report is null.
*/
private def getClientToken(report: ApplicationReport): String =
Option(report.getClientToAMToken).map(_.toString).getOrElse("")
/**
* Fail fast if we have requested more resources per container than is available in the cluster.
*/
private def verifyClusterResources(newAppResponse: GetNewApplicationResponse): Unit = {
val maxMem = newAppResponse.getMaximumResourceCapability().getMemory()
logInfo("Verifying our application has not requested more than the maximum " +
s"memory capability of the cluster ($maxMem MB per container)")
val executorMem = executorMemory + executorMemoryOverhead
if (executorMem > maxMem) {
throw new IllegalArgumentException(s"Required executor memory ($executorMemory" +
s"+$executorMemoryOverhead MB) is above the max threshold ($maxMem MB) of this cluster! " +
"Please check the values of 'yarn.scheduler.maximum-allocation-mb' and/or " +
"'yarn.nodemanager.resource.memory-mb'.")
}
val amMem = amMemory + amMemoryOverhead
if (amMem > maxMem) {
throw new IllegalArgumentException(s"Required AM memory ($amMemory" +
s"+$amMemoryOverhead MB) is above the max threshold ($maxMem MB) of this cluster! " +
"Please increase the value of 'yarn.scheduler.maximum-allocation-mb'.")
}
logInfo("Will allocate AM container, with %d MB memory including %d MB overhead".format(
amMem,
amMemoryOverhead))
// We could add checks to make sure the entire cluster has enough resources but that involves
// getting all the node reports and computing ourselves.
}
/**
* Copy the given file to a remote file system (e.g. HDFS) if needed.
* The file is only copied if the source and destination file systems are different. This is used
* for preparing resources for launching the ApplicationMaster container. Exposed for testing.
*/
private[yarn] def copyFileToRemote(
destDir: Path,
srcPath: Path,
replication: Short,
symlinkCache: Map[URI, Path],
force: Boolean = false,
destName: Option[String] = None): Path = {
val destFs = destDir.getFileSystem(hadoopConf)
val srcFs = srcPath.getFileSystem(hadoopConf)
var destPath = srcPath
if (force || !compareFs(srcFs, destFs)) {
destPath = new Path(destDir, destName.getOrElse(srcPath.getName()))
logInfo(s"Uploading resource $srcPath -> $destPath")
FileUtil.copy(srcFs, srcPath, destFs, destPath, false, hadoopConf)
destFs.setReplication(destPath, replication)
destFs.setPermission(destPath, new FsPermission(APP_FILE_PERMISSION))
} else {
logInfo(s"Source and destination file systems are the same. Not copying $srcPath")
}
// Resolve any symlinks in the URI path so using a "current" symlink to point to a specific
// version shows the specific version in the distributed cache configuration
val qualifiedDestPath = destFs.makeQualified(destPath)
val qualifiedDestDir = qualifiedDestPath.getParent
val resolvedDestDir = symlinkCache.getOrElseUpdate(qualifiedDestDir.toUri(), {
val fc = FileContext.getFileContext(qualifiedDestDir.toUri(), hadoopConf)
fc.resolvePath(qualifiedDestDir)
})
new Path(resolvedDestDir, qualifiedDestPath.getName())
}
/**
* Upload any resources to the distributed cache if needed. If a resource is intended to be
* consumed locally, set up the appropriate config for downstream code to handle it properly.
* This is used for setting up a container launch context for our ApplicationMaster.
* Exposed for testing.
*/
def prepareLocalResources(
destDir: Path,
pySparkArchives: Seq[String]): HashMap[String, LocalResource] = {
logInfo("Preparing resources for our AM container")
// Upload Spark and the application JAR to the remote file system if necessary,
// and add them as local resources to the application master.
val fs = destDir.getFileSystem(hadoopConf)
// Merge credentials obtained from registered providers
val nearestTimeOfNextRenewal = credentialManager.obtainCredentials(hadoopConf, credentials)
if (credentials != null) {
// Add credentials to current user's UGI, so that following operations don't need to use the
// Kerberos tgt to get delegations again in the client side.
UserGroupInformation.getCurrentUser.addCredentials(credentials)
logDebug(YarnSparkHadoopUtil.get.dumpTokens(credentials).mkString("\\n"))
}
// If we use principal and keytab to login, also credentials can be renewed some time
// after current time, we should pass the next renewal and updating time to credential
// renewer and updater.
if (loginFromKeytab && nearestTimeOfNextRenewal > System.currentTimeMillis() &&
nearestTimeOfNextRenewal != Long.MaxValue) {
// Valid renewal time is 75% of next renewal time, and the valid update time will be
// slightly later then renewal time (80% of next renewal time). This is to make sure
// credentials are renewed and updated before expired.
val currTime = System.currentTimeMillis()
val renewalTime = (nearestTimeOfNextRenewal - currTime) * 0.75 + currTime
val updateTime = (nearestTimeOfNextRenewal - currTime) * 0.8 + currTime
sparkConf.set(CREDENTIALS_RENEWAL_TIME, renewalTime.toLong)
sparkConf.set(CREDENTIALS_UPDATE_TIME, updateTime.toLong)
}
// Used to keep track of URIs added to the distributed cache. If the same URI is added
// multiple times, YARN will fail to launch containers for the app with an internal
// error.
val distributedUris = new HashSet[String]
// Used to keep track of URIs(files) added to the distribute cache have the same name. If
// same name but different path files are added multiple time, YARN will fail to launch
// containers for the app with an internal error.
val distributedNames = new HashSet[String]
val replication = sparkConf.get(STAGING_FILE_REPLICATION).map(_.toShort)
.getOrElse(fs.getDefaultReplication(destDir))
val localResources = HashMap[String, LocalResource]()
FileSystem.mkdirs(fs, destDir, new FsPermission(STAGING_DIR_PERMISSION))
val statCache: Map[URI, FileStatus] = HashMap[URI, FileStatus]()
val symlinkCache: Map[URI, Path] = HashMap[URI, Path]()
def addDistributedUri(uri: URI): Boolean = {
val uriStr = uri.toString()
val fileName = new File(uri.getPath).getName
if (distributedUris.contains(uriStr)) {
logWarning(s"Same path resource $uri added multiple times to distributed cache.")
false
} else if (distributedNames.contains(fileName)) {
logWarning(s"Same name resource $uri added multiple times to distributed cache")
false
} else {
distributedUris += uriStr
distributedNames += fileName
true
}
}
/**
* Distribute a file to the cluster.
*
* If the file's path is a "local:" URI, it's actually not distributed. Other files are copied
* to HDFS (if not already there) and added to the application's distributed cache.
*
* @param path URI of the file to distribute.
* @param resType Type of resource being distributed.
* @param destName Name of the file in the distributed cache.
* @param targetDir Subdirectory where to place the file.
* @param appMasterOnly Whether to distribute only to the AM.
* @return A 2-tuple. First item is whether the file is a "local:" URI. Second item is the
* localized path for non-local paths, or the input `path` for local paths.
* The localized path will be null if the URI has already been added to the cache.
*/
def distribute(
path: String,
resType: LocalResourceType = LocalResourceType.FILE,
destName: Option[String] = None,
targetDir: Option[String] = None,
appMasterOnly: Boolean = false): (Boolean, String) = {
val trimmedPath = path.trim()
val localURI = Utils.resolveURI(trimmedPath)
if (localURI.getScheme != LOCAL_SCHEME) {
if (addDistributedUri(localURI)) {
val localPath = getQualifiedLocalPath(localURI, hadoopConf)
val linkname = targetDir.map(_ + "/").getOrElse("") +
destName.orElse(Option(localURI.getFragment())).getOrElse(localPath.getName())
val destPath = copyFileToRemote(destDir, localPath, replication, symlinkCache)
val destFs = FileSystem.get(destPath.toUri(), hadoopConf)
distCacheMgr.addResource(
destFs, hadoopConf, destPath, localResources, resType, linkname, statCache,
appMasterOnly = appMasterOnly)
(false, linkname)
} else {
(false, null)
}
} else {
(true, trimmedPath)
}
}
// If we passed in a keytab, make sure we copy the keytab to the staging directory on
// HDFS, and setup the relevant environment vars, so the AM can login again.
if (loginFromKeytab) {
logInfo("To enable the AM to login from keytab, credentials are being copied over to the AM" +
" via the YARN Secure Distributed Cache.")
val (_, localizedPath) = distribute(keytab,
destName = Some(amKeytabFileName),
appMasterOnly = true)
require(localizedPath != null, "Keytab file already distributed.")
}
/**
* Add Spark to the cache. There are two settings that control what files to add to the cache:
* - if a Spark archive is defined, use the archive. The archive is expected to contain
* jar files at its root directory.
* - if a list of jars is provided, filter the non-local ones, resolve globs, and
* add the found files to the cache.
*
* Note that the archive cannot be a "local" URI. If none of the above settings are found,
* then upload all files found in $SPARK_HOME/jars.
*/
val sparkArchive = sparkConf.get(SPARK_ARCHIVE)
if (sparkArchive.isDefined) {
val archive = sparkArchive.get
require(!isLocalUri(archive), s"${SPARK_ARCHIVE.key} cannot be a local URI.")
distribute(Utils.resolveURI(archive).toString,
resType = LocalResourceType.ARCHIVE,
destName = Some(LOCALIZED_LIB_DIR))
} else {
sparkConf.get(SPARK_JARS) match {
case Some(jars) =>
// Break the list of jars to upload, and resolve globs.
val localJars = new ArrayBuffer[String]()
jars.foreach { jar =>
if (!isLocalUri(jar)) {
val path = getQualifiedLocalPath(Utils.resolveURI(jar), hadoopConf)
val pathFs = FileSystem.get(path.toUri(), hadoopConf)
pathFs.globStatus(path).filter(_.isFile()).foreach { entry =>
val uri = entry.getPath().toUri()
statCache.update(uri, entry)
distribute(uri.toString(), targetDir = Some(LOCALIZED_LIB_DIR))
}
} else {
localJars += jar
}
}
// Propagate the local URIs to the containers using the configuration.
sparkConf.set(SPARK_JARS, localJars)
case None =>
// No configuration, so fall back to uploading local jar files.
logWarning(s"Neither ${SPARK_JARS.key} nor ${SPARK_ARCHIVE.key} is set, falling back " +
"to uploading libraries under SPARK_HOME.")
val jarsDir = new File(YarnCommandBuilderUtils.findJarsDir(
sparkConf.getenv("SPARK_HOME")))
val jarsArchive = File.createTempFile(LOCALIZED_LIB_DIR, ".zip",
new File(Utils.getLocalDir(sparkConf)))
val jarsStream = new ZipOutputStream(new FileOutputStream(jarsArchive))
try {
jarsStream.setLevel(0)
jarsDir.listFiles().foreach { f =>
if (f.isFile && f.getName.toLowerCase(Locale.ROOT).endsWith(".jar") && f.canRead) {
jarsStream.putNextEntry(new ZipEntry(f.getName))
Files.copy(f, jarsStream)
jarsStream.closeEntry()
}
}
} finally {
jarsStream.close()
}
distribute(jarsArchive.toURI.getPath,
resType = LocalResourceType.ARCHIVE,
destName = Some(LOCALIZED_LIB_DIR))
jarsArchive.delete()
}
}
/**
* Copy user jar to the distributed cache if their scheme is not "local".
* Otherwise, set the corresponding key in our SparkConf to handle it downstream.
*/
Option(args.userJar).filter(_.trim.nonEmpty).foreach { jar =>
val (isLocal, localizedPath) = distribute(jar, destName = Some(APP_JAR_NAME))
if (isLocal) {
require(localizedPath != null, s"Path $jar already distributed")
// If the resource is intended for local use only, handle this downstream
// by setting the appropriate property
sparkConf.set(APP_JAR, localizedPath)
}
}
/**
* Do the same for any additional resources passed in through ClientArguments.
* Each resource category is represented by a 3-tuple of:
* (1) comma separated list of resources in this category,
* (2) resource type, and
* (3) whether to add these resources to the classpath
*/
val cachedSecondaryJarLinks = ListBuffer.empty[String]
List(
(sparkConf.get(JARS_TO_DISTRIBUTE), LocalResourceType.FILE, true),
(sparkConf.get(FILES_TO_DISTRIBUTE), LocalResourceType.FILE, false),
(sparkConf.get(ARCHIVES_TO_DISTRIBUTE), LocalResourceType.ARCHIVE, false)
).foreach { case (flist, resType, addToClasspath) =>
flist.foreach { file =>
val (_, localizedPath) = distribute(file, resType = resType)
// If addToClassPath, we ignore adding jar multiple times to distributed cache.
if (addToClasspath) {
if (localizedPath != null) {
cachedSecondaryJarLinks += localizedPath
}
} else {
if (localizedPath == null) {
throw new IllegalArgumentException(s"Attempt to add ($file) multiple times" +
" to the distributed cache.")
}
}
}
}
if (cachedSecondaryJarLinks.nonEmpty) {
sparkConf.set(SECONDARY_JARS, cachedSecondaryJarLinks)
}
if (isClusterMode && args.primaryPyFile != null) {
distribute(args.primaryPyFile, appMasterOnly = true)
}
pySparkArchives.foreach { f => distribute(f) }
// The python files list needs to be treated especially. All files that are not an
// archive need to be placed in a subdirectory that will be added to PYTHONPATH.
sparkConf.get(PY_FILES).foreach { f =>
val targetDir = if (f.endsWith(".py")) Some(LOCALIZED_PYTHON_DIR) else None
distribute(f, targetDir = targetDir)
}
// Update the configuration with all the distributed files, minus the conf archive. The
// conf archive will be handled by the AM differently so that we avoid having to send
// this configuration by other means. See SPARK-14602 for one reason of why this is needed.
distCacheMgr.updateConfiguration(sparkConf)
// Upload the conf archive to HDFS manually, and record its location in the configuration.
// This will allow the AM to know where the conf archive is in HDFS, so that it can be
// distributed to the containers.
//
// This code forces the archive to be copied, so that unit tests pass (since in that case both
// file systems are the same and the archive wouldn't normally be copied). In most (all?)
// deployments, the archive would be copied anyway, since it's a temp file in the local file
// system.
val remoteConfArchivePath = new Path(destDir, LOCALIZED_CONF_ARCHIVE)
val remoteFs = FileSystem.get(remoteConfArchivePath.toUri(), hadoopConf)
sparkConf.set(CACHED_CONF_ARCHIVE, remoteConfArchivePath.toString())
val localConfArchive = new Path(createConfArchive().toURI())
copyFileToRemote(destDir, localConfArchive, replication, symlinkCache, force = true,
destName = Some(LOCALIZED_CONF_ARCHIVE))
// Manually add the config archive to the cache manager so that the AM is launched with
// the proper files set up.
distCacheMgr.addResource(
remoteFs, hadoopConf, remoteConfArchivePath, localResources, LocalResourceType.ARCHIVE,
LOCALIZED_CONF_DIR, statCache, appMasterOnly = false)
// Clear the cache-related entries from the configuration to avoid them polluting the
// UI's environment page. This works for client mode; for cluster mode, this is handled
// by the AM.
CACHE_CONFIGS.foreach(sparkConf.remove)
localResources
}
/**
* Create an archive with the config files for distribution.
*
* These will be used by AM and executors. The files are zipped and added to the job as an
* archive, so that YARN will explode it when distributing to AM and executors. This directory
* is then added to the classpath of AM and executor process, just to make sure that everybody
* is using the same default config.
*
* This follows the order of precedence set by the startup scripts, in which HADOOP_CONF_DIR
* shows up in the classpath before YARN_CONF_DIR.
*
* Currently this makes a shallow copy of the conf directory. If there are cases where a
* Hadoop config directory contains subdirectories, this code will have to be fixed.
*
* The archive also contains some Spark configuration. Namely, it saves the contents of
* SparkConf in a file to be loaded by the AM process.
*/
private def createConfArchive(): File = {
val hadoopConfFiles = new HashMap[String, File]()
// Uploading $SPARK_CONF_DIR/log4j.properties file to the distributed cache to make sure that
// the executors will use the latest configurations instead of the default values. This is
// required when user changes log4j.properties directly to set the log configurations. If
// configuration file is provided through --files then executors will be taking configurations
// from --files instead of $SPARK_CONF_DIR/log4j.properties.
// Also uploading metrics.properties to distributed cache if exists in classpath.
// If user specify this file using --files then executors will use the one
// from --files instead.
for { prop <- Seq("log4j.properties", "metrics.properties")
url <- Option(Utils.getContextOrSparkClassLoader.getResource(prop))
if url.getProtocol == "file" } {
hadoopConfFiles(prop) = new File(url.getPath)
}
Seq("HADOOP_CONF_DIR", "YARN_CONF_DIR").foreach { envKey =>
sys.env.get(envKey).foreach { path =>
val dir = new File(path)
if (dir.isDirectory()) {
val files = dir.listFiles()
if (files == null) {
logWarning("Failed to list files under directory " + dir)
} else {
files.foreach { file =>
if (file.isFile && !hadoopConfFiles.contains(file.getName())) {
hadoopConfFiles(file.getName()) = file
}
}
}
}
}
}
val confArchive = File.createTempFile(LOCALIZED_CONF_DIR, ".zip",
new File(Utils.getLocalDir(sparkConf)))
val confStream = new ZipOutputStream(new FileOutputStream(confArchive))
try {
confStream.setLevel(0)
hadoopConfFiles.foreach { case (name, file) =>
if (file.canRead()) {
confStream.putNextEntry(new ZipEntry(name))
Files.copy(file, confStream)
confStream.closeEntry()
}
}
// Save Spark configuration to a file in the archive.
val props = new Properties()
sparkConf.getAll.foreach { case (k, v) => props.setProperty(k, v) }
// Override spark.yarn.key to point to the location in distributed cache which will be used
// by AM.
Option(amKeytabFileName).foreach { k => props.setProperty(KEYTAB.key, k) }
confStream.putNextEntry(new ZipEntry(SPARK_CONF_FILE))
val writer = new OutputStreamWriter(confStream, StandardCharsets.UTF_8)
props.store(writer, "Spark configuration.")
writer.flush()
confStream.closeEntry()
} finally {
confStream.close()
}
confArchive
}
/**
* Set up the environment for launching our ApplicationMaster container.
*/
private def setupLaunchEnv(
stagingDirPath: Path,
pySparkArchives: Seq[String]): HashMap[String, String] = {
logInfo("Setting up the launch environment for our AM container")
val env = new HashMap[String, String]()
populateClasspath(args, yarnConf, sparkConf, env, sparkConf.get(DRIVER_CLASS_PATH))
env("SPARK_YARN_MODE") = "true"
env("SPARK_YARN_STAGING_DIR") = stagingDirPath.toString
env("SPARK_USER") = UserGroupInformation.getCurrentUser().getShortUserName()
if (loginFromKeytab) {
val credentialsFile = "credentials-" + UUID.randomUUID().toString
sparkConf.set(CREDENTIALS_FILE_PATH, new Path(stagingDirPath, credentialsFile).toString)
logInfo(s"Credentials file set to: $credentialsFile")
}
// Pick up any environment variables for the AM provided through spark.yarn.appMasterEnv.*
val amEnvPrefix = "spark.yarn.appMasterEnv."
sparkConf.getAll
.filter { case (k, v) => k.startsWith(amEnvPrefix) }
.map { case (k, v) => (k.substring(amEnvPrefix.length), v) }
.foreach { case (k, v) => YarnSparkHadoopUtil.addPathToEnvironment(env, k, v) }
// If pyFiles contains any .py files, we need to add LOCALIZED_PYTHON_DIR to the PYTHONPATH
// of the container processes too. Add all non-.py files directly to PYTHONPATH.
//
// NOTE: the code currently does not handle .py files defined with a "local:" scheme.
val pythonPath = new ListBuffer[String]()
val (pyFiles, pyArchives) = sparkConf.get(PY_FILES).partition(_.endsWith(".py"))
if (pyFiles.nonEmpty) {
pythonPath += buildPath(Environment.PWD.$$(), LOCALIZED_PYTHON_DIR)
}
(pySparkArchives ++ pyArchives).foreach { path =>
val uri = Utils.resolveURI(path)
if (uri.getScheme != LOCAL_SCHEME) {
pythonPath += buildPath(Environment.PWD.$$(), new Path(uri).getName())
} else {
pythonPath += uri.getPath()
}
}
// Finally, update the Spark config to propagate PYTHONPATH to the AM and executors.
if (pythonPath.nonEmpty) {
val pythonPathStr = (sys.env.get("PYTHONPATH") ++ pythonPath)
.mkString(ApplicationConstants.CLASS_PATH_SEPARATOR)
env("PYTHONPATH") = pythonPathStr
sparkConf.setExecutorEnv("PYTHONPATH", pythonPathStr)
}
if (isClusterMode) {
// propagate PYSPARK_DRIVER_PYTHON and PYSPARK_PYTHON to driver in cluster mode
Seq("PYSPARK_DRIVER_PYTHON", "PYSPARK_PYTHON").foreach { envname =>
if (!env.contains(envname)) {
sys.env.get(envname).foreach(env(envname) = _)
}
}
sys.env.get("PYTHONHASHSEED").foreach(env.put("PYTHONHASHSEED", _))
}
sys.env.get(ENV_DIST_CLASSPATH).foreach { dcp =>
env(ENV_DIST_CLASSPATH) = dcp
}
env
}
/**
* Set up a ContainerLaunchContext to launch our ApplicationMaster container.
* This sets up the launch environment, java options, and the command for launching the AM.
*/
private def createContainerLaunchContext(newAppResponse: GetNewApplicationResponse)
: ContainerLaunchContext = {
logInfo("Setting up container launch context for our AM")
val appId = newAppResponse.getApplicationId
val appStagingDirPath = new Path(appStagingBaseDir, getAppStagingDir(appId))
val pySparkArchives =
if (sparkConf.get(IS_PYTHON_APP)) {
findPySparkArchives()
} else {
Nil
}
val launchEnv = setupLaunchEnv(appStagingDirPath, pySparkArchives)
val localResources = prepareLocalResources(appStagingDirPath, pySparkArchives)
val amContainer = Records.newRecord(classOf[ContainerLaunchContext])
amContainer.setLocalResources(localResources.asJava)
amContainer.setEnvironment(launchEnv.asJava)
val javaOpts = ListBuffer[String]()
// Set the environment variable through a command prefix
// to append to the existing value of the variable
var prefixEnv: Option[String] = None
// Add Xmx for AM memory
javaOpts += "-Xmx" + amMemory + "m"
val tmpDir = new Path(Environment.PWD.$$(), YarnConfiguration.DEFAULT_CONTAINER_TEMP_DIR)
javaOpts += "-Djava.io.tmpdir=" + tmpDir
// TODO: Remove once cpuset version is pushed out.
// The context is, default gc for server class machines ends up using all cores to do gc -
// hence if there are multiple containers in same node, Spark GC affects all other containers'
// performance (which can be that of other Spark containers)
// Instead of using this, rely on cpusets by YARN to enforce "proper" Spark behavior in
// multi-tenant environments. Not sure how default Java GC behaves if it is limited to subset
// of cores on a node.
val useConcurrentAndIncrementalGC = launchEnv.get("SPARK_USE_CONC_INCR_GC").exists(_.toBoolean)
if (useConcurrentAndIncrementalGC) {
// In our expts, using (default) throughput collector has severe perf ramifications in
// multi-tenant machines
javaOpts += "-XX:+UseConcMarkSweepGC"
javaOpts += "-XX:MaxTenuringThreshold=31"
javaOpts += "-XX:SurvivorRatio=8"
javaOpts += "-XX:+CMSIncrementalMode"
javaOpts += "-XX:+CMSIncrementalPacing"
javaOpts += "-XX:CMSIncrementalDutyCycleMin=0"
javaOpts += "-XX:CMSIncrementalDutyCycle=10"
}
// Include driver-specific java options if we are launching a driver
if (isClusterMode) {
sparkConf.get(DRIVER_JAVA_OPTIONS).foreach { opts =>
javaOpts ++= Utils.splitCommandString(opts).map(YarnSparkHadoopUtil.escapeForShell)
}
val libraryPaths = Seq(sparkConf.get(DRIVER_LIBRARY_PATH),
sys.props.get("spark.driver.libraryPath")).flatten
if (libraryPaths.nonEmpty) {
prefixEnv = Some(getClusterPath(sparkConf, Utils.libraryPathEnvPrefix(libraryPaths)))
}
if (sparkConf.get(AM_JAVA_OPTIONS).isDefined) {
logWarning(s"${AM_JAVA_OPTIONS.key} will not take effect in cluster mode")
}
} else {
// Validate and include yarn am specific java options in yarn-client mode.
sparkConf.get(AM_JAVA_OPTIONS).foreach { opts =>
if (opts.contains("-Dspark")) {
val msg = s"${AM_JAVA_OPTIONS.key} is not allowed to set Spark options (was '$opts')."
throw new SparkException(msg)
}
if (opts.contains("-Xmx")) {
val msg = s"${AM_JAVA_OPTIONS.key} is not allowed to specify max heap memory settings " +
s"(was '$opts'). Use spark.yarn.am.memory instead."
throw new SparkException(msg)
}
javaOpts ++= Utils.splitCommandString(opts).map(YarnSparkHadoopUtil.escapeForShell)
}
sparkConf.get(AM_LIBRARY_PATH).foreach { paths =>
prefixEnv = Some(getClusterPath(sparkConf, Utils.libraryPathEnvPrefix(Seq(paths))))
}
}
// For log4j configuration to reference
javaOpts += ("-Dspark.yarn.app.container.log.dir=" + ApplicationConstants.LOG_DIR_EXPANSION_VAR)
val userClass =
if (isClusterMode) {
Seq("--class", YarnSparkHadoopUtil.escapeForShell(args.userClass))
} else {
Nil
}
val userJar =
if (args.userJar != null) {
Seq("--jar", args.userJar)
} else {
Nil
}
val primaryPyFile =
if (isClusterMode && args.primaryPyFile != null) {
Seq("--primary-py-file", new Path(args.primaryPyFile).getName())
} else {
Nil
}
val primaryRFile =
if (args.primaryRFile != null) {
Seq("--primary-r-file", args.primaryRFile)
} else {
Nil
}
val amClass =
if (isClusterMode) {
Utils.classForName("org.apache.spark.deploy.yarn.ApplicationMaster").getName
} else {
Utils.classForName("org.apache.spark.deploy.yarn.ExecutorLauncher").getName
}
if (args.primaryRFile != null && args.primaryRFile.endsWith(".R")) {
args.userArgs = ArrayBuffer(args.primaryRFile) ++ args.userArgs
}
val userArgs = args.userArgs.flatMap { arg =>
Seq("--arg", YarnSparkHadoopUtil.escapeForShell(arg))
}
val amArgs =
Seq(amClass) ++ userClass ++ userJar ++ primaryPyFile ++ primaryRFile ++ userArgs ++
Seq("--properties-file", buildPath(Environment.PWD.$$(), LOCALIZED_CONF_DIR, SPARK_CONF_FILE))
// Command for the ApplicationMaster
val commands = prefixEnv ++
Seq(Environment.JAVA_HOME.$$() + "/bin/java", "-server") ++
javaOpts ++ amArgs ++
Seq(
"1>", ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stdout",
"2>", ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stderr")
// TODO: it would be nicer to just make sure there are no null commands here
val printableCommands = commands.map(s => if (s == null) "null" else s).toList
amContainer.setCommands(printableCommands.asJava)
logDebug("===============================================================================")
logDebug("YARN AM launch context:")
logDebug(s" user class: ${Option(args.userClass).getOrElse("N/A")}")
logDebug(" env:")
launchEnv.foreach { case (k, v) => logDebug(s" $k -> $v") }
logDebug(" resources:")
localResources.foreach { case (k, v) => logDebug(s" $k -> $v")}
logDebug(" command:")
logDebug(s" ${printableCommands.mkString(" ")}")
logDebug("===============================================================================")
// send the acl settings into YARN to control who has access via YARN interfaces
val securityManager = new SecurityManager(sparkConf)
amContainer.setApplicationACLs(
YarnSparkHadoopUtil.getApplicationAclsForYarn(securityManager).asJava)
setupSecurityToken(amContainer)
amContainer
}
def setupCredentials(): Unit = {
loginFromKeytab = sparkConf.contains(PRINCIPAL.key)
if (loginFromKeytab) {
principal = sparkConf.get(PRINCIPAL).get
keytab = sparkConf.get(KEYTAB).orNull
require(keytab != null, "Keytab must be specified when principal is specified.")
logInfo("Attempting to login to the Kerberos" +
s" using principal: $principal and keytab: $keytab")
val f = new File(keytab)
// Generate a file name that can be used for the keytab file, that does not conflict
// with any user file.
amKeytabFileName = f.getName + "-" + UUID.randomUUID().toString
sparkConf.set(PRINCIPAL.key, principal)
}
// Defensive copy of the credentials
credentials = new Credentials(UserGroupInformation.getCurrentUser.getCredentials)
}
/**
* Report the state of an application until it has exited, either successfully or
* due to some failure, then return a pair of the yarn application state (FINISHED, FAILED,
* KILLED, or RUNNING) and the final application state (UNDEFINED, SUCCEEDED, FAILED,
* or KILLED).
*
* @param appId ID of the application to monitor.
* @param returnOnRunning Whether to also return the application state when it is RUNNING.
* @param logApplicationReport Whether to log details of the application report every iteration.
* @return A pair of the yarn application state and the final application state.
*/
def monitorApplication(
appId: ApplicationId,
returnOnRunning: Boolean = false,
logApplicationReport: Boolean = true): (YarnApplicationState, FinalApplicationStatus) = {
val interval = sparkConf.get(REPORT_INTERVAL)
var lastState: YarnApplicationState = null
while (true) {
Thread.sleep(interval)
val report: ApplicationReport =
try {
getApplicationReport(appId)
} catch {
case e: ApplicationNotFoundException =>
logError(s"Application $appId not found.")
cleanupStagingDir(appId)
return (YarnApplicationState.KILLED, FinalApplicationStatus.KILLED)
case NonFatal(e) =>
logError(s"Failed to contact YARN for application $appId.", e)
// Don't necessarily clean up staging dir because status is unknown
return (YarnApplicationState.FAILED, FinalApplicationStatus.FAILED)
}
val state = report.getYarnApplicationState
if (logApplicationReport) {
logInfo(s"Application report for $appId (state: $state)")
// If DEBUG is enabled, log report details every iteration
// Otherwise, log them every time the application changes state
if (log.isDebugEnabled) {
logDebug(formatReportDetails(report))
} else if (lastState != state) {
logInfo(formatReportDetails(report))
}
}
if (lastState != state) {
state match {
case YarnApplicationState.RUNNING =>
reportLauncherState(SparkAppHandle.State.RUNNING)
case YarnApplicationState.FINISHED =>
report.getFinalApplicationStatus match {
case FinalApplicationStatus.FAILED =>
reportLauncherState(SparkAppHandle.State.FAILED)
case FinalApplicationStatus.KILLED =>
reportLauncherState(SparkAppHandle.State.KILLED)
case _ =>
reportLauncherState(SparkAppHandle.State.FINISHED)
}
case YarnApplicationState.FAILED =>
reportLauncherState(SparkAppHandle.State.FAILED)
case YarnApplicationState.KILLED =>
reportLauncherState(SparkAppHandle.State.KILLED)
case _ =>
}
}
if (state == YarnApplicationState.FINISHED ||
state == YarnApplicationState.FAILED ||
state == YarnApplicationState.KILLED) {
cleanupStagingDir(appId)
return (state, report.getFinalApplicationStatus)
}
if (returnOnRunning && state == YarnApplicationState.RUNNING) {
return (state, report.getFinalApplicationStatus)
}
lastState = state
}
// Never reached, but keeps compiler happy
throw new SparkException("While loop is depleted! This should never happen...")
}
private def formatReportDetails(report: ApplicationReport): String = {
val details = Seq[(String, String)](
("client token", getClientToken(report)),
("diagnostics", report.getDiagnostics),
("ApplicationMaster host", report.getHost),
("ApplicationMaster RPC port", report.getRpcPort.toString),
("queue", report.getQueue),
("start time", report.getStartTime.toString),
("final status", report.getFinalApplicationStatus.toString),
("tracking URL", report.getTrackingUrl),
("user", report.getUser)
)
// Use more loggable format if value is null or empty
details.map { case (k, v) =>
val newValue = Option(v).filter(_.nonEmpty).getOrElse("N/A")
s"\\n\\t $k: $newValue"
}.mkString("")
}
/**
* Submit an application to the ResourceManager.
* If set spark.yarn.submit.waitAppCompletion to true, it will stay alive
* reporting the application's status until the application has exited for any reason.
* Otherwise, the client process will exit after submission.
* If the application finishes with a failed, killed, or undefined status,
* throw an appropriate SparkException.
*/
def run(): Unit = {
this.appId = submitApplication()
if (!launcherBackend.isConnected() && fireAndForget) {
val report = getApplicationReport(appId)
val state = report.getYarnApplicationState
logInfo(s"Application report for $appId (state: $state)")
logInfo(formatReportDetails(report))
if (state == YarnApplicationState.FAILED || state == YarnApplicationState.KILLED) {
throw new SparkException(s"Application $appId finished with status: $state")
}
} else {
val (yarnApplicationState, finalApplicationStatus) = monitorApplication(appId)
if (yarnApplicationState == YarnApplicationState.FAILED ||
finalApplicationStatus == FinalApplicationStatus.FAILED) {
throw new SparkException(s"Application $appId finished with failed status")
}
if (yarnApplicationState == YarnApplicationState.KILLED ||
finalApplicationStatus == FinalApplicationStatus.KILLED) {
throw new SparkException(s"Application $appId is killed")
}
if (finalApplicationStatus == FinalApplicationStatus.UNDEFINED) {
throw new SparkException(s"The final status of application $appId is undefined")
}
}
}
private def findPySparkArchives(): Seq[String] = {
sys.env.get("PYSPARK_ARCHIVES_PATH")
.map(_.split(",").toSeq)
.getOrElse {
val pyLibPath = Seq(sys.env("SPARK_HOME"), "python", "lib").mkString(File.separator)
val pyArchivesFile = new File(pyLibPath, "pyspark.zip")
require(pyArchivesFile.exists(),
s"$pyArchivesFile not found; cannot run pyspark application in YARN mode.")
val py4jFile = new File(pyLibPath, "py4j-0.10.4-src.zip")
require(py4jFile.exists(),
s"$py4jFile not found; cannot run pyspark application in YARN mode.")
Seq(pyArchivesFile.getAbsolutePath(), py4jFile.getAbsolutePath())
}
}
}
private object Client extends Logging {
def main(argStrings: Array[String]) {
if (!sys.props.contains("SPARK_SUBMIT")) {
logWarning("WARNING: This client is deprecated and will be removed in a " +
"future version of Spark. Use ./bin/spark-submit with \\"--master yarn\\"")
}
// Set an env variable indicating we are running in YARN mode.
// Note that any env variable with the SPARK_ prefix gets propagated to all (remote) processes
System.setProperty("SPARK_YARN_MODE", "true")
val sparkConf = new SparkConf
// SparkSubmit would use yarn cache to distribute files & jars in yarn mode,
// so remove them from sparkConf here for yarn mode.
sparkConf.remove("spark.jars")
sparkConf.remove("spark.files")
val args = new ClientArguments(argStrings)
new Client(args, sparkConf).run()
}
// Alias for the user jar
val APP_JAR_NAME: String = "__app__.jar"
// URI scheme that identifies local resources
val LOCAL_SCHEME = "local"
// Staging directory for any temporary jars or files
val SPARK_STAGING: String = ".sparkStaging"
// Staging directory is private! -> rwx--------
val STAGING_DIR_PERMISSION: FsPermission =
FsPermission.createImmutable(Integer.parseInt("700", 8).toShort)
// App files are world-wide readable and owner writable -> rw-r--r--
val APP_FILE_PERMISSION: FsPermission =
FsPermission.createImmutable(Integer.parseInt("644", 8).toShort)
// Distribution-defined classpath to add to processes
val ENV_DIST_CLASSPATH = "SPARK_DIST_CLASSPATH"
// Subdirectory where the user's Spark and Hadoop config files will be placed.
val LOCALIZED_CONF_DIR = "__spark_conf__"
// File containing the conf archive in the AM. See prepareLocalResources().
val LOCALIZED_CONF_ARCHIVE = LOCALIZED_CONF_DIR + ".zip"
// Name of the file in the conf archive containing Spark configuration.
val SPARK_CONF_FILE = "__spark_conf__.properties"
// Subdirectory where the user's python files (not archives) will be placed.
val LOCALIZED_PYTHON_DIR = "__pyfiles__"
// Subdirectory where Spark libraries will be placed.
val LOCALIZED_LIB_DIR = "__spark_libs__"
/**
* Return the path to the given application's staging directory.
*/
private def getAppStagingDir(appId: ApplicationId): String = {
buildPath(SPARK_STAGING, appId.toString())
}
/**
* Populate the classpath entry in the given environment map with any application
* classpath specified through the Hadoop and Yarn configurations.
*/
private[yarn] def populateHadoopClasspath(conf: Configuration, env: HashMap[String, String])
: Unit = {
val classPathElementsToAdd = getYarnAppClasspath(conf) ++ getMRAppClasspath(conf)
classPathElementsToAdd.foreach { c =>
YarnSparkHadoopUtil.addPathToEnvironment(env, Environment.CLASSPATH.name, c.trim)
}
}
private def getYarnAppClasspath(conf: Configuration): Seq[String] =
Option(conf.getStrings(YarnConfiguration.YARN_APPLICATION_CLASSPATH)) match {
case Some(s) => s.toSeq
case None => getDefaultYarnApplicationClasspath
}
private def getMRAppClasspath(conf: Configuration): Seq[String] =
Option(conf.getStrings("mapreduce.application.classpath")) match {
case Some(s) => s.toSeq
case None => getDefaultMRApplicationClasspath
}
private[yarn] def getDefaultYarnApplicationClasspath: Seq[String] =
YarnConfiguration.DEFAULT_YARN_APPLICATION_CLASSPATH.toSeq
private[yarn] def getDefaultMRApplicationClasspath: Seq[String] =
StringUtils.getStrings(MRJobConfig.DEFAULT_MAPREDUCE_APPLICATION_CLASSPATH).toSeq
/**
* Populate the classpath entry in the given environment map.
*
* User jars are generally not added to the JVM's system classpath; those are handled by the AM
* and executor backend. When the deprecated `spark.yarn.user.classpath.first` is used, user jars
* are included in the system classpath, though. The extra class path and other uploaded files are
* always made available through the system class path.
*
* @param args Client arguments (when starting the AM) or null (when starting executors).
*/
private[yarn] def populateClasspath(
args: ClientArguments,
conf: Configuration,
sparkConf: SparkConf,
env: HashMap[String, String],
extraClassPath: Option[String] = None): Unit = {
extraClassPath.foreach { cp =>
addClasspathEntry(getClusterPath(sparkConf, cp), env)
}
addClasspathEntry(Environment.PWD.$$(), env)
addClasspathEntry(Environment.PWD.$$() + Path.SEPARATOR + LOCALIZED_CONF_DIR, env)
if (sparkConf.get(USER_CLASS_PATH_FIRST)) {
// in order to properly add the app jar when user classpath is first
// we have to do the mainJar separate in order to send the right thing
// into addFileToClasspath
val mainJar =
if (args != null) {
getMainJarUri(Option(args.userJar))
} else {
getMainJarUri(sparkConf.get(APP_JAR))
}
mainJar.foreach(addFileToClasspath(sparkConf, conf, _, APP_JAR_NAME, env))
val secondaryJars =
if (args != null) {
getSecondaryJarUris(Option(sparkConf.get(JARS_TO_DISTRIBUTE)))
} else {
getSecondaryJarUris(sparkConf.get(SECONDARY_JARS))
}
secondaryJars.foreach { x =>
addFileToClasspath(sparkConf, conf, x, null, env)
}
}
// Add the Spark jars to the classpath, depending on how they were distributed.
addClasspathEntry(buildPath(Environment.PWD.$$(), LOCALIZED_LIB_DIR, "*"), env)
if (sparkConf.get(SPARK_ARCHIVE).isEmpty) {
sparkConf.get(SPARK_JARS).foreach { jars =>
jars.filter(isLocalUri).foreach { jar =>
addClasspathEntry(getClusterPath(sparkConf, jar), env)
}
}
}
populateHadoopClasspath(conf, env)
sys.env.get(ENV_DIST_CLASSPATH).foreach { cp =>
addClasspathEntry(getClusterPath(sparkConf, cp), env)
}
}
/**
* Returns a list of URIs representing the user classpath.
*
* @param conf Spark configuration.
*/
def getUserClasspath(conf: SparkConf): Array[URI] = {
val mainUri = getMainJarUri(conf.get(APP_JAR))
val secondaryUris = getSecondaryJarUris(conf.get(SECONDARY_JARS))
(mainUri ++ secondaryUris).toArray
}
private def getMainJarUri(mainJar: Option[String]): Option[URI] = {
mainJar.flatMap { path =>
val uri = Utils.resolveURI(path)
if (uri.getScheme == LOCAL_SCHEME) Some(uri) else None
}.orElse(Some(new URI(APP_JAR_NAME)))
}
private def getSecondaryJarUris(secondaryJars: Option[Seq[String]]): Seq[URI] = {
secondaryJars.getOrElse(Nil).map(new URI(_))
}
/**
* Adds the given path to the classpath, handling "local:" URIs correctly.
*
* If an alternate name for the file is given, and it's not a "local:" file, the alternate
* name will be added to the classpath (relative to the job's work directory).
*
* If not a "local:" file and no alternate name, the linkName will be added to the classpath.
*
* @param conf Spark configuration.
* @param hadoopConf Hadoop configuration.
* @param uri URI to add to classpath (optional).
* @param fileName Alternate name for the file (optional).
* @param env Map holding the environment variables.
*/
private def addFileToClasspath(
conf: SparkConf,
hadoopConf: Configuration,
uri: URI,
fileName: String,
env: HashMap[String, String]): Unit = {
if (uri != null && uri.getScheme == LOCAL_SCHEME) {
addClasspathEntry(getClusterPath(conf, uri.getPath), env)
} else if (fileName != null) {
addClasspathEntry(buildPath(Environment.PWD.$$(), fileName), env)
} else if (uri != null) {
val localPath = getQualifiedLocalPath(uri, hadoopConf)
val linkName = Option(uri.getFragment()).getOrElse(localPath.getName())
addClasspathEntry(buildPath(Environment.PWD.$$(), linkName), env)
}
}
/**
* Add the given path to the classpath entry of the given environment map.
* If the classpath is already set, this appends the new path to the existing classpath.
*/
private def addClasspathEntry(path: String, env: HashMap[String, String]): Unit =
YarnSparkHadoopUtil.addPathToEnvironment(env, Environment.CLASSPATH.name, path)
/**
* Returns the path to be sent to the NM for a path that is valid on the gateway.
*
* This method uses two configuration values:
*
* - spark.yarn.config.gatewayPath: a string that identifies a portion of the input path that may
* only be valid in the gateway node.
* - spark.yarn.config.replacementPath: a string with which to replace the gateway path. This may
* contain, for example, env variable references, which will be expanded by the NMs when
* starting containers.
*
* If either config is not available, the input path is returned.
*/
def getClusterPath(conf: SparkConf, path: String): String = {
val localPath = conf.get(GATEWAY_ROOT_PATH)
val clusterPath = conf.get(REPLACEMENT_ROOT_PATH)
if (localPath != null && clusterPath != null) {
path.replace(localPath, clusterPath)
} else {
path
}
}
/**
* Return whether the two file systems are the same.
*/
private def compareFs(srcFs: FileSystem, destFs: FileSystem): Boolean = {
val srcUri = srcFs.getUri()
val dstUri = destFs.getUri()
if (srcUri.getScheme() == null || srcUri.getScheme() != dstUri.getScheme()) {
return false
}
var srcHost = srcUri.getHost()
var dstHost = dstUri.getHost()
// In HA or when using viewfs, the host part of the URI may not actually be a host, but the
// name of the HDFS namespace. Those names won't resolve, so avoid even trying if they
// match.
if (srcHost != null && dstHost != null && srcHost != dstHost) {
try {
srcHost = InetAddress.getByName(srcHost).getCanonicalHostName()
dstHost = InetAddress.getByName(dstHost).getCanonicalHostName()
} catch {
case e: UnknownHostException =>
return false
}
}
Objects.equal(srcHost, dstHost) && srcUri.getPort() == dstUri.getPort()
}
/**
* Given a local URI, resolve it and return a qualified local path that corresponds to the URI.
* This is used for preparing local resources to be included in the container launch context.
*/
private def getQualifiedLocalPath(localURI: URI, hadoopConf: Configuration): Path = {
val qualifiedURI =
if (localURI.getScheme == null) {
// If not specified, assume this is in the local filesystem to keep the behavior
// consistent with that of Hadoop
new URI(FileSystem.getLocal(hadoopConf).makeQualified(new Path(localURI)).toString)
} else {
localURI
}
new Path(qualifiedURI)
}
/**
* Whether to consider jars provided by the user to have precedence over the Spark jars when
* loading user classes.
*/
def isUserClassPathFirst(conf: SparkConf, isDriver: Boolean): Boolean = {
if (isDriver) {
conf.get(DRIVER_USER_CLASS_PATH_FIRST)
} else {
conf.get(EXECUTOR_USER_CLASS_PATH_FIRST)
}
}
/**
* Joins all the path components using Path.SEPARATOR.
*/
def buildPath(components: String*): String = {
components.mkString(Path.SEPARATOR)
}
/** Returns whether the URI is a "local:" URI. */
def isLocalUri(uri: String): Boolean = {
uri.startsWith(s"$LOCAL_SCHEME:")
}
}
|
bOOm-X/spark
|
resource-managers/yarn/src/main/scala/org/apache/spark/deploy/yarn/Client.scala
|
Scala
|
apache-2.0
| 60,490 |
object Test extends App {
// expr.isInstanceOf[Singleton] is true iff the expression has a singleton type
// However, expr.asInstanceOf[Singleton] is erased to expr.asInstanceOf[Any] so it never throws
// as discussed in http://docs.scala-lang.org/sips/minutes/2017-12-06-sip-minutes.html
val foo: String = "foo"
assert(foo.isInstanceOf[Singleton])
assert((foo: String).isInstanceOf[Singleton])
foo.asInstanceOf[Singleton]
(foo: String).asInstanceOf[Singleton]
val bar: "foo" = "foo"
assert(bar.isInstanceOf[Singleton])
assert((bar: String).isInstanceOf[Singleton])
bar.asInstanceOf[Singleton]
(bar: String).asInstanceOf[Singleton]
final val baz = "foo"
assert(baz.isInstanceOf[Singleton])
assert((baz: String).isInstanceOf[Singleton])
baz.asInstanceOf[Singleton]
(baz: String).asInstanceOf[Singleton]
assert("foo".isInstanceOf[Singleton])
assert(("foo": String).isInstanceOf[Singleton])
"foo".asInstanceOf[Singleton]
("foo": String).asInstanceOf[Singleton]
val x = 1
val y: x.type = x
assert((y: (x.type with y.type)).isInstanceOf[Singleton])
assert((y: (x.type with Int)).isInstanceOf[Singleton])
type A = x.type
assert((y: A).isInstanceOf[Singleton])
assert(!(null: String).isInstanceOf[Singleton])
}
|
martijnhoekstra/scala
|
test/files/run/sip23-singleton-isas.scala
|
Scala
|
apache-2.0
| 1,268 |
abstract class Payable
class CreditCard extends Payable {
val cardNumber: String
val holderName: String // as printed on the card
val expireDate: Date // e.g. 2012-12-31
val securityCode: String // usually the 3 digits on the back of the card
val issuer: String // the bank that issued the card
}
class Visa extends CreditCard
class MasterCard extends CreditCard
class AmericanExpress extends CreditCard
class DebitCard extends Payable {
val cardNumber: String
val holderName: String
val expireDate: Date
val pin: String
}
class ConvenienceCard extends DebitCard
class ElectronicCash extends DebitCard
class UnionPayCard extends DebitCard
trait Billable {
val id: String // globally unique identifier
val price: Int // to make it simple, you can assume the total price = price of the itinerary x number of travelers
val payee: String // which airline to be paid?
}
class PaymentResult {
val success: Boolean
val message: String // indicate what's wrong if success == false
def toXML =
<paymentResult>
<success>{ success }</success>
<message>{ message }</message>
</paymentResult>
}
abstract class PaymentGateway {
val url: String // user must visit this url to complete payments
def pay(item: Billable, visaCard: Visa): PaymentResult = {
// pre-condition
assert( item.price > 0 )
assert( item.payee is valid ) // no money laundry allowed :)
assert( visaCard is valid ) // check expire date and security code
// processing
// post-condition
assert( visaCard is deducted item.price amount of money )
new PaymentResult // either success or failure
}
def pay(item: Billable, masterCard: MasterCard): PaymentResult = {
// similar pre-/post-condition for all pay(...) methods here and below
}
}
class PayPal extends PaymentGateway {
val url = "http://paypal.yoda.informatik.hs-mannheim.de"
// accept Visa and Master, and ...
def pay(item: Billable, americanExpress: AmericanExpress): PaymentResult
def pay(item: Billable, convenienceCard: ConvenienceCard): PaymentResult
}
class EuroPay extends PaymentGateway {
val url = "http://europay.yoda.informatik.hs-mannheim.de"
// accept Visa and Master, and ...
def pay(item: Billable, electronicCash: ElectronicCash): PaymentResult
}
class Alipay {
val url = "http://alipay.yoda.informatik.hs-mannheim.de"
def pay(item: Billable, visa: Visa): PaymentResult = {
// same pre-condition as PaymentGateway, plus the following one more rule
assert( visa.issuer is a Chinese bank ) // only accept Visa card issued by a Chinese bank
// same post-condition as PaymentGateway
}
def pay(item: Billable, masterCard: MasterCard): PaymentResult {
// same pre-condition as PaymentGateway, plus the following one more rule
assert( masterCard.issuer is a Chinese bank ) // only accept MasterCard issued by a Chinese bank
// same post-condition as PaymentGateway
}
def pay(item: Billable, unionPayCard: UnionPayCard): PaymentResult
}
|
jwachter/travel-service
|
docs/reference/payment.scala
|
Scala
|
apache-2.0
| 3,065 |
// Copyright (c) 2011-2015 ScalaMock Contributors (https://github.com/paulbutcher/ScalaMock/graphs/contributors)
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package org.scalamock.test.scalatest
import org.scalamock.scalatest.MockFactory
import org.scalamock.test.mockable.TestTrait
import org.scalatest.ParallelTestExecution
import org.scalatest.flatspec.AnyFlatSpec
import org.scalatest.matchers.should.Matchers
/**
* Tests for mocks defined in suite scope (i.e. outside test case scope) with predefined expectations
*
* Tests for issue #25
*/
class SuiteScopePresetMockParallelTest extends AnyFlatSpec with Matchers with ParallelTestExecution with MockFactory {
// please note that this test suite mixes in ParallelTestExecution trait
override def newInstance = new SuiteScopePresetMockParallelTest
val mockWithExpectationsPredefined = mock[TestTrait]
(mockWithExpectationsPredefined.oneParamMethod _).expects(0).returning("predefined")
"ScalaTest suite" should "allow to use mock defined suite scope with predefined expectations" in {
(mockWithExpectationsPredefined.oneParamMethod _).expects(1).returning("one")
mockWithExpectationsPredefined.oneParamMethod(0) shouldBe "predefined"
mockWithExpectationsPredefined.oneParamMethod(1) shouldBe "one"
}
it should "keep predefined mock expectations" in {
(mockWithExpectationsPredefined.oneParamMethod _).expects(2).returning("two")
mockWithExpectationsPredefined.oneParamMethod(0) shouldBe "predefined"
mockWithExpectationsPredefined.oneParamMethod(2) shouldBe "two"
}
}
|
paulbutcher/ScalaMock
|
shared/src/test/scala/org/scalamock/test/scalatest/SuiteScopePresetMockParallelTest.scala
|
Scala
|
mit
| 2,607 |
/*
* Copyright [2013] [Antoine comte]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import net.antoinecomte.jugl.challenge2013.Diet._
import org.scalatest._
class DietSpec extends FlatSpec {
"Diet" should "find zero output activities" in {
val in = List(
Activity("coca-light", 1),
Activity("croissant", 180),
Activity("au-travail-a-velo", -113),
Activity("guitar-hero", -181)
)
val out = Seq("croissant", "coca-light", "guitar-hero").sorted
assert(resolve(in).sorted === out)
}
}
|
antoinecomte/JUGL-Challenge-2013
|
src/test/scala/DietSpec.scala
|
Scala
|
apache-2.0
| 1,077 |
/* Copyright 2009-2013 EPFL, Lausanne
*
* Author: Ravi
* Date: 20.11.2013
**/
import leon.lang._
import leon.collection._
object Heaps {
sealed abstract class Heap {
val rank : BigInt = this match {
case Leaf() => 0
case Node(_, l, r) =>
1 + max(l.rank, r.rank)
}
def content : Set[BigInt] = this match {
case Leaf() => Set[BigInt]()
case Node(v,l,r) => l.content ++ Set(v) ++ r.content
}
}
case class Leaf() extends Heap
case class Node(value:BigInt, left: Heap, right: Heap) extends Heap
def max(i1 : BigInt, i2 : BigInt) = if (i1 >= i2) i1 else i2
def hasHeapProperty(h : Heap) : Boolean = h match {
case Leaf() => true
case Node(v, l, r) =>
( l match {
case Leaf() => true
case n@Node(v2,_,_) => v >= v2 && hasHeapProperty(n)
}) &&
( r match {
case Leaf() => true
case n@Node(v2,_,_) => v >= v2 && hasHeapProperty(n)
})
}
def hasLeftistProperty(h: Heap) : Boolean = h match {
case Leaf() => true
case Node(_,l,r) =>
hasLeftistProperty(l) &&
hasLeftistProperty(r) &&
l.rank >= r.rank
}
def heapSize(t: Heap): BigInt = { t match {
case Leaf() => BigInt(0)
case Node(v, l, r) => heapSize(l) + 1 + heapSize(r)
}} ensuring(_ >= 0)
private def merge(h1: Heap, h2: Heap) : Heap = {
require(
hasLeftistProperty(h1) && hasLeftistProperty(h2) &&
hasHeapProperty(h1) && hasHeapProperty(h2)
)
(h1,h2) match {
case (Leaf(), _) => h1 // FIXME: swapped these cases
case (_, Leaf()) => h2 // FIXME
case (Node(v1, l1, r1), Node(v2, l2, r2)) =>
if(v1 >= v2)
makeN(v1, l1, merge(r1, h2))
else
makeN(v2, l2, merge(h1, r2))
}
} ensuring { res =>
hasLeftistProperty(res) && hasHeapProperty(res) &&
heapSize(h1) + heapSize(h2) == heapSize(res) &&
h1.content ++ h2.content == res.content
}
private def makeN(value: BigInt, left: Heap, right: Heap) : Heap = {
require(
hasLeftistProperty(left) && hasLeftistProperty(right)
)
if(left.rank >= right.rank)
Node(value, left, right)
else
Node(value, right, left)
} ensuring { res =>
hasLeftistProperty(res) }
def insert(element: BigInt, heap: Heap) : Heap = {
require(hasLeftistProperty(heap) && hasHeapProperty(heap))
merge(Node(element, Leaf(), Leaf()), heap)
} ensuring { res =>
hasLeftistProperty(res) && hasHeapProperty(res) &&
heapSize(res) == heapSize(heap) + 1 &&
res.content == heap.content ++ Set(element)
}
def findMax(h: Heap) : Option[BigInt] = {
h match {
case Node(m,_,_) => Some(m)
case Leaf() => None()
}
}
def removeMax(h: Heap) : Heap = {
require(hasLeftistProperty(h) && hasHeapProperty(h))
h match {
case Node(_,l,r) => merge(l, r)
case l => l
}
} ensuring { res =>
hasLeftistProperty(res) && hasHeapProperty(res)
}
}
|
ericpony/scala-examples
|
testcases/repair/Heap/Heap10.scala
|
Scala
|
mit
| 2,989 |
package co.theasi.plotly.writer
import org.json4s._
import co.theasi.plotly._
object SeriesReader {
def fromJson(json: JObject): Series = {
val seriesType = json \\ "type"
seriesType match {
case JString("bar") => barFromJson(json)
case JString("box") => boxFromJson(json)
case JNothing => scatterFromJson(json)
case _ => throw new UnexpectedServerResponse(
s"Unrecognized series type: $seriesType")
}
}
private def barFromJson(json: JObject): Bar[PType, PType] = {
val (xs, ys) = xyFromJson(json)
Bar(xs, ys, BarOptions())
}
private def boxFromJson(json: JObject): Box[PType] = {
val xs = yFromJson(json)
Box(xs, BoxOptions())
}
private def scatterFromJson(json: JObject): Scatter[PType, PType] = {
val (xs, ys) = xyFromJson(json)
Scatter(xs, ys, ScatterOptions())
}
private def yFromJson(json: JObject): List[PType] = {
val JArray(xDataAsJson) = json \\ "y"
columnFromJson(xDataAsJson)
}
private def xyFromJson(json: JObject): (List[PType], List[PType]) = {
val JArray(xDataAsJson) = json \\ "x"
val xs = columnFromJson(xDataAsJson)
val JArray(yDataAsJson) = json \\ "y"
val ys = columnFromJson(yDataAsJson)
(xs, ys)
}
private def columnFromJson(data: List[JValue]): List[PType] =
data.map { jsonToPType _ }
private def jsonToPType[X <: JValue](x: X) = x match {
case JInt(i) => PInt(i.toInt)
case JDouble(d) => PDouble(d)
case JString(s) => PString(s)
}
}
|
ASIDataScience/scala-plotly-client
|
src/main/scala/co/theasi/plotly/writer/SeriesReader.scala
|
Scala
|
mit
| 1,507 |
package org.globalnames
package parser
package runner.web.models
import spray.json.JsValue
case class NamesRequest(names: Seq[String])
case class NamesResponse(namesJson: Seq[formatters.Summarizer.Summary])
|
GlobalNamesArchitecture/gnparser
|
runner/src/main/scala/org/globalnames/parser/runner/web/models/NamesRequest.scala
|
Scala
|
mit
| 210 |
package im.actor.server.group
import java.time.{ LocalDateTime, ZoneOffset }
import akka.actor.Status
import akka.pattern.pipe
import com.google.protobuf.ByteString
import im.actor.api.rpc.Update
import im.actor.api.rpc.groups._
import im.actor.api.rpc.messaging.ApiServiceMessage
import im.actor.api.rpc.misc.ApiExtension
import im.actor.api.rpc.users.ApiSex
import im.actor.server.ApiConversions._
import im.actor.server.acl.ACLUtils
import im.actor.server.history.HistoryUtils
import im.actor.server.{ persist ⇒ p, models }
import im.actor.server.event.TSEvent
import im.actor.server.file.{ ImageUtils, Avatar }
import im.actor.server.group.GroupErrors._
import im.actor.server.office.PushTexts
import im.actor.server.dialog.group.GroupDialogOperations
import im.actor.server.sequence.SeqUpdatesManager._
import im.actor.server.sequence.{ SeqState, SeqStateDate }
import im.actor.server.user.UserOffice
import ACLUtils._
import im.actor.util.misc.IdUtils._
import ImageUtils._
import org.joda.time.DateTime
import slick.driver.PostgresDriver.api._
import scala.concurrent.Future
import scala.concurrent.forkjoin.ThreadLocalRandom
private[group] trait GroupCommandHandlers extends GroupsImplicits with GroupCommandHelpers {
this: GroupProcessor ⇒
import GroupCommands._
import GroupEvents._
protected def createInternal(typ: GroupType, creatorUserId: Int, title: String, userIds: Seq[Int], isHidden: Option[Boolean], isHistoryShared: Option[Boolean], extensions: Seq[ApiExtension] = Seq.empty): Unit = {
val accessHash = genAccessHash()
val date = now()
val created = GroupEvents.Created(groupId, Some(typ), creatorUserId, accessHash, title, (userIds.toSet + creatorUserId).toSeq, isHidden, isHistoryShared, extensions)
val state = initState(date, created)
persist(TSEvent(date, created)) { _ ⇒
context become working(state)
val rng = ThreadLocalRandom.current()
// FIXME: invite other members
val update = UpdateGroupInvite(groupId, creatorUserId, date.getMillis, rng.nextLong())
db.run(for {
_ ← createInDb(state, rng.nextLong())
_ ← p.GroupUser.create(groupId, creatorUserId, creatorUserId, date, None, isAdmin = true)
_ ← DBIO.from(UserOffice.broadcastUserUpdate(creatorUserId, update, pushText = None, isFat = true, deliveryId = Some(s"creategroup_${groupId}_${update.randomId}")))
} yield CreateInternalAck(accessHash)) pipeTo sender() onFailure {
case e ⇒
log.error(e, "Failed to create group internally")
}
}
}
protected def create(groupId: Int, typ: GroupType, creatorUserId: Int, creatorAuthId: Long, title: String, randomId: Long, userIds: Set[Int]): Unit = {
val accessHash = genAccessHash()
val rng = ThreadLocalRandom.current()
userIds.filterNot(_ == creatorUserId) foreach { userId ⇒
val randomId = rng.nextLong()
context.parent ! Invite(groupId, userId, creatorUserId, creatorAuthId, randomId)
}
val date = now()
val created = GroupEvents.Created(groupId, Some(typ), creatorUserId, accessHash, title, Seq(creatorUserId), isHidden = Some(false), isHistoryShared = Some(false))
val state = initState(date, created)
persist(TSEvent(date, created)) { _ ⇒
context become working(state)
val serviceMessage = GroupServiceMessages.groupCreated
val update = UpdateGroupInvite(groupId = groupId, inviteUserId = creatorUserId, date = date.getMillis, randomId = randomId)
db.run(
for {
_ ← p.Group.create(
models.Group(
id = groupId,
creatorUserId = state.creatorUserId,
accessHash = state.accessHash,
title = state.title,
isPublic = (state.typ == GroupType.Public),
createdAt = state.createdAt,
about = None,
topic = None
),
randomId
)
_ ← p.GroupUser.create(groupId, creatorUserId, creatorUserId, date, None, isAdmin = true)
_ ← HistoryUtils.writeHistoryMessage(
models.Peer.privat(creatorUserId),
models.Peer.group(state.id),
date,
randomId,
serviceMessage.header,
serviceMessage.toByteArray
)
seqstate ← if (isBot(state, creatorUserId)) DBIO.successful(SeqState(0, ByteString.EMPTY))
else DBIO.from(UserOffice.broadcastClientUpdate(creatorUserId, creatorAuthId, update, pushText = None, isFat = true, deliveryId = Some(s"creategroup_${groupId}_${randomId}")))
} yield CreateAck(state.accessHash, seqstate, date.getMillis)
) pipeTo sender() onFailure {
case e ⇒
log.error(e, "Failed to create a group")
}
}
val botUserId = nextIntId(rng)
val botToken = accessToken(rng)
val botAdded = GroupEvents.BotAdded(botUserId, botToken)
persist(TSEvent(now(), botAdded)) { tsEvt ⇒
context become working(updatedState(tsEvt, state))
(for {
_ ← UserOffice.create(botUserId, nextAccessSalt(ThreadLocalRandom.current()), "Bot", "US", ApiSex.Unknown, isBot = true)
_ ← db.run(p.GroupBot.create(groupId, botUserId, botToken))
_ ← integrationTokensKv.upsert(botToken, groupId)
} yield ()) onFailure {
case e ⇒
log.error(e, "Failed to create group bot")
}
}
}
protected def invite(group: Group, userId: Int, inviterUserId: Int, inviterAuthId: Long, randomId: Long, date: DateTime): Future[SeqStateDate] = {
val dateMillis = date.getMillis
val memberIds = group.members.keySet
val inviteeUpdate = UpdateGroupInvite(groupId = groupId, randomId = randomId, inviteUserId = inviterUserId, date = dateMillis)
val userAddedUpdate = UpdateGroupUserInvited(groupId = groupId, userId = userId, inviterUserId = inviterUserId, date = dateMillis, randomId = randomId)
val serviceMessage = GroupServiceMessages.userInvited(userId)
for {
_ ← db.run(p.GroupUser.create(groupId, userId, inviterUserId, date, None, isAdmin = false))
_ ← UserOffice.broadcastUserUpdate(userId, inviteeUpdate, pushText = Some(PushTexts.Invited), isFat = true, deliveryId = Some(s"invite_${groupId}_${randomId}"))
// TODO: #perf the following broadcasts do update serializing per each user
_ ← Future.sequence(memberIds.toSeq.filterNot(_ == inviterUserId).map(UserOffice.broadcastUserUpdate(_, userAddedUpdate, Some(PushTexts.Added), isFat = true, deliveryId = Some(s"useradded_${groupId}_${randomId}")))) // use broadcastUsersUpdate maybe?
seqstate ← UserOffice.broadcastClientUpdate(inviterUserId, inviterAuthId, userAddedUpdate, pushText = None, isFat = true, deliveryId = Some(s"useradded_${groupId}_${randomId}"))
// TODO: Move to a History Writing subsystem
_ ← db.run(HistoryUtils.writeHistoryMessage(
models.Peer.privat(inviterUserId),
models.Peer.group(groupId),
date,
randomId,
serviceMessage.header,
serviceMessage.toByteArray
))
} yield {
SeqStateDate(seqstate.seq, seqstate.state, dateMillis)
}
}
protected def setJoined(group: Group, joiningUserId: Int, joiningUserAuthId: Long, invitingUserId: Int): Unit = {
if (!hasMember(group, joiningUserId) || isInvited(group, joiningUserId)) {
val replyTo = sender()
persist(TSEvent(now(), GroupEvents.UserJoined(joiningUserId, invitingUserId))) { evt ⇒
val newState = workWith(evt, group)
val memberIds = group.members.keySet
val action: DBIO[(SeqStateDate, Vector[Int], Long)] = {
for {
updates ← {
val date = new DateTime
val randomId = ThreadLocalRandom.current().nextLong()
for {
exists ← p.GroupUser.exists(groupId, joiningUserId)
_ ← if (exists) DBIO.successful(()) else p.GroupUser.create(groupId, joiningUserId, invitingUserId, date, Some(LocalDateTime.now(ZoneOffset.UTC)), isAdmin = false)
seqstatedate ← DBIO.from(GroupDialogOperations.sendMessage(groupId, joiningUserId, joiningUserAuthId, randomId, GroupServiceMessages.userJoined, isFat = true))
} yield (seqstatedate, memberIds.toVector :+ invitingUserId, randomId)
}
} yield updates
}
db.run(action) pipeTo replyTo onFailure {
case e ⇒
replyTo ! Status.Failure(e)
}
}
} else {
sender() ! Status.Failure(GroupErrors.UserAlreadyInvited)
}
}
protected def kick(group: Group, kickedUserId: Int, kickerUserId: Int, kickerAuthId: Long, randomId: Long): Unit = {
val replyTo = sender()
val date = new DateTime
persist(TSEvent(now(), GroupEvents.UserKicked(kickedUserId, kickerUserId, date.getMillis))) { evt ⇒
workWith(evt, group)
val update = UpdateGroupUserKick(groupId, kickedUserId, kickerUserId, date.getMillis, randomId)
val serviceMessage = GroupServiceMessages.userKicked(kickedUserId)
db.run(removeUser(kickedUserId, group.members.keySet, kickerAuthId, serviceMessage, update, date, randomId)) pipeTo replyTo onFailure {
case e ⇒ replyTo ! Status.Failure(e)
}
}
}
protected def leave(group: Group, userId: Int, authId: Long, randomId: Long): Unit = {
val replyTo = sender()
val date = new DateTime
persist(TSEvent(now(), GroupEvents.UserLeft(userId, date.getMillis))) { evt ⇒
workWith(evt, group)
val update = UpdateGroupUserLeave(groupId, userId, date.getMillis, randomId)
val serviceMessage = GroupServiceMessages.userLeft(userId)
db.run(removeUser(userId, group.members.keySet, authId, serviceMessage, update, date, randomId)) pipeTo replyTo onFailure {
case e ⇒ replyTo ! Status.Failure(e)
}
}
}
protected def updateAvatar(group: Group, clientUserId: Int, clientAuthId: Long, avatarOpt: Option[Avatar], randomId: Long): Unit = {
persistStashingReply(TSEvent(now(), AvatarUpdated(avatarOpt)), group) { evt ⇒
val date = new DateTime
val avatarData = avatarOpt map (getAvatarData(models.AvatarData.OfGroup, groupId, _)) getOrElse models.AvatarData.empty(models.AvatarData.OfGroup, groupId.toLong)
val update = UpdateGroupAvatarChanged(groupId, clientUserId, avatarOpt, date.getMillis, randomId)
val serviceMessage = GroupServiceMessages.changedAvatar(avatarOpt)
val memberIds = group.members.keySet
db.run(for {
_ ← p.AvatarData.createOrUpdate(avatarData)
(seqstate, _) ← broadcastClientAndUsersUpdate(clientUserId, clientAuthId, memberIds, update, None, isFat = false)
} yield {
db.run(HistoryUtils.writeHistoryMessage(
models.Peer.privat(clientUserId),
models.Peer.group(groupId),
date,
randomId,
serviceMessage.header,
serviceMessage.toByteArray
))
UpdateAvatarAck(avatarOpt, SeqStateDate(seqstate.seq, seqstate.state, date.getMillis))
})
}
}
protected def makePublic(group: Group, description: String): Unit = {
persistStashingReply(Vector(TSEvent(now(), BecamePublic()), TSEvent(now(), AboutUpdated(Some(description)))), group) { _ ⇒
db.run(DBIO.sequence(Seq(
p.Group.makePublic(groupId),
p.Group.updateAbout(groupId, Some(description))
))) map (_ ⇒ MakePublicAck())
}
}
protected def updateTitle(group: Group, clientUserId: Int, clientAuthId: Long, title: String, randomId: Long): Unit = {
val memberIds = group.members.keySet
persistStashingReply(TSEvent(now(), TitleUpdated(title)), group) { _ ⇒
val date = new DateTime
val update = UpdateGroupTitleChanged(groupId = groupId, userId = clientUserId, title = title, date = date.getMillis, randomId = randomId)
val serviceMessage = GroupServiceMessages.changedTitle(title)
db.run(for {
_ ← p.Group.updateTitle(groupId, title, clientUserId, randomId, date)
_ ← HistoryUtils.writeHistoryMessage(
models.Peer.privat(clientUserId),
models.Peer.group(groupId),
date,
randomId,
serviceMessage.header,
serviceMessage.toByteArray
)
(seqstate, _) ← broadcastClientAndUsersUpdate(clientUserId, clientAuthId, memberIds, update, Some(PushTexts.TitleChanged), isFat = false)
} yield SeqStateDate(seqstate.seq, seqstate.state, date.getMillis))
}
}
protected def updateTopic(group: Group, clientUserId: Int, clientAuthId: Long, topic: Option[String], randomId: Long): Unit = {
withGroupMember(group, clientUserId) { member ⇒
val trimmed = topic.map(_.trim)
if (trimmed.map(s ⇒ s.nonEmpty & s.length < 255).getOrElse(true)) {
persistStashingReply(TSEvent(now(), TopicUpdated(trimmed)), group) { _ ⇒
val date = new DateTime
val dateMillis = date.getMillis
val serviceMessage = GroupServiceMessages.changedTopic(trimmed)
val update = UpdateGroupTopicChanged(groupId = groupId, randomId = randomId, userId = clientUserId, topic = trimmed, date = dateMillis)
db.run(for {
_ ← p.Group.updateTopic(groupId, trimmed)
_ ← HistoryUtils.writeHistoryMessage(
models.Peer.privat(clientUserId),
models.Peer.group(groupId),
date,
randomId,
serviceMessage.header,
serviceMessage.toByteArray
)
(SeqState(seq, state), _) ← broadcastClientAndUsersUpdate(
clientUserId = clientUserId,
clientAuthId = clientAuthId,
userIds = group.members.keySet - clientUserId,
update = update,
pushText = Some(PushTexts.TopicChanged),
isFat = false
)
} yield SeqStateDate(seq, state, dateMillis))
}
} else {
sender() ! Status.Failure(TopicTooLong)
}
}
}
protected def updateAbout(group: Group, clientUserId: Int, clientAuthId: Long, about: Option[String], randomId: Long): Unit = {
withGroupAdmin(group, clientUserId) {
val trimmed = about.map(_.trim)
if (trimmed.map(s ⇒ s.nonEmpty & s.length < 255).getOrElse(true)) {
persistStashingReply(TSEvent(now(), AboutUpdated(trimmed)), group) { _ ⇒
val date = new DateTime
val dateMillis = date.getMillis
val update = UpdateGroupAboutChanged(groupId, trimmed)
val serviceMessage = GroupServiceMessages.changedAbout(trimmed)
db.run(for {
_ ← p.Group.updateAbout(groupId, trimmed)
_ ← HistoryUtils.writeHistoryMessage(
models.Peer.privat(clientUserId),
models.Peer.group(groupId),
date,
randomId,
serviceMessage.header,
serviceMessage.toByteArray
)
(SeqState(seq, state), _) ← broadcastClientAndUsersUpdate(
clientUserId = clientUserId,
clientAuthId = clientAuthId,
userIds = group.members.keySet - clientUserId,
update = update,
pushText = Some(PushTexts.AboutChanged),
isFat = false
)
} yield SeqStateDate(seq, state, dateMillis))
}
} else {
sender() ! Status.Failure(AboutTooLong)
}
}
}
protected def makeUserAdmin(group: Group, clientUserId: Int, clientAuthId: Long, candidateId: Int): Unit = {
withGroupAdmin(group, clientUserId) {
withGroupMember(group, candidateId) { member ⇒
persistStashingReply(TSEvent(now(), UserBecameAdmin(candidateId, clientUserId)), group) { e ⇒
val date = e.ts
if (!member.isAdmin) {
//we have current state, that does not updated by UserBecameAdmin event. That's why we update it manually
val updated = group.members.updated(candidateId, group.members(candidateId).copy(isAdmin = true))
val members = updated.values.map(_.asStruct).toVector
db.run(for {
_ ← p.GroupUser.makeAdmin(groupId, candidateId)
(seqState, _) ← broadcastClientAndUsersUpdate(
clientUserId = clientUserId,
clientAuthId = clientAuthId,
userIds = group.members.keySet - clientUserId,
update = UpdateGroupMembersUpdate(groupId, members),
pushText = None,
isFat = false
)
} yield (members, seqState))
} else {
Future.failed(UserAlreadyAdmin)
}
}
}
}
}
protected def revokeIntegrationToken(group: Group, userId: Int): Unit = {
withGroupAdmin(group, userId) {
val oldToken = group.bot.map(_.token)
val newToken = accessToken(ThreadLocalRandom.current())
persistStashingReply(TSEvent(now(), IntegrationTokenRevoked(newToken)), group) { _ ⇒
for {
_ ← db.run(p.GroupBot.updateToken(groupId, newToken))
_ ← integrationTokensKv.delete(oldToken.getOrElse(""))
_ ← integrationTokensKv.upsert(newToken, groupId)
} yield RevokeIntegrationTokenAck(newToken)
}
}
}
private def removeUser(userId: Int, memberIds: Set[Int], clientAuthId: Long, serviceMessage: ApiServiceMessage, update: Update, date: DateTime, randomId: Long): DBIO[SeqStateDate] = {
val groupPeer = models.Peer.group(groupId)
for {
_ ← p.GroupUser.delete(groupId, userId)
_ ← p.GroupInviteToken.revoke(groupId, userId)
(SeqState(seq, state), _) ← broadcastClientAndUsersUpdate(userId, clientAuthId, memberIds - userId, update, Some(PushTexts.Left), isFat = false)
// TODO: Move to a History Writing subsystem
_ ← p.Dialog.updateLastReadAt(userId, groupPeer, date)
_ ← p.Dialog.updateOwnerLastReadAt(userId, groupPeer, date)
_ ← HistoryUtils.writeHistoryMessage(
models.Peer.privat(userId),
groupPeer,
date,
randomId,
serviceMessage.header,
serviceMessage.toByteArray
)
} yield SeqStateDate(seq, state, date.getMillis)
}
private def genAccessHash(): Long =
ThreadLocalRandom.current().nextLong()
private def createInDb(state: Group, randomId: Long) =
p.Group.create(
models.Group(
id = groupId,
creatorUserId = state.creatorUserId,
accessHash = state.accessHash,
title = state.title,
isPublic = (state.typ == GroupType.Public),
createdAt = state.createdAt,
about = None,
topic = None
),
randomId
)
}
|
liruqi/actor-platform
|
actor-server/actor-core/src/main/scala/im/actor/server/group/GroupCommandHandlers.scala
|
Scala
|
mit
| 18,821 |
package org.danielnixon.progressive.play
import java.net.URI
import org.danielnixon.progressive.play.extensions.RequestHeaderWrapper
import play.api.mvc.{ Call, RequestHeader, Result }
import play.api.mvc.Results.Redirect
import scala.util.Try
import scalaz.Scalaz._
object Results {
def redirectToRefererOrElse(call: Call)(implicit request: RequestHeader): Result = {
request.referer.
flatMap(r => Try(new URI(r)).toOption).
filter(r => !r.isAbsolute || request.domain === r.getHost).
map(r => Redirect(r.toString)).
getOrElse(Redirect(call))
}
}
|
danielnixon/progressive
|
server-play/src/main/scala/org/danielnixon/progressive/play/Results.scala
|
Scala
|
gpl-3.0
| 584 |
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package wvlet.airframe.legacy
import wvlet.airspec.AirSpec
import wvlet.airframe._
/**
*/
class DesignBuildTest extends AirSpec {
test("visible outer variables in code block") {
val helloDesign = "hello"
val d = newSilentDesign
.bind[String].toInstance(helloDesign)
d.build[String] { x => helloDesign }
}
}
|
wvlet/airframe
|
airframe-di/src/test/scala-2/wvlet/airframe/legacy/DesignBuildTest.scala
|
Scala
|
apache-2.0
| 896 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.launcher
import java.util.concurrent.TimeUnit
import scala.concurrent.duration._
import org.scalatest.concurrent.Eventually._
import org.scalatest.matchers.must.Matchers
import org.scalatest.matchers.should.Matchers._
import org.apache.spark._
import org.apache.spark.internal.config.UI.UI_ENABLED
import org.apache.spark.util.Utils
class LauncherBackendSuite extends SparkFunSuite with Matchers {
private val tests = Seq(
"local" -> "local",
"standalone/client" -> "local-cluster[1,1,1024]")
tests.foreach { case (name, master) =>
test(s"$name: launcher handle") {
// The tests here are failed due to the cmd length limitation up to 8K on Windows.
assume(!Utils.isWindows)
testWithMaster(master)
}
}
private def testWithMaster(master: String): Unit = {
val env = new java.util.HashMap[String, String]()
env.put("SPARK_PRINT_LAUNCH_COMMAND", "1")
val handle = new SparkLauncher(env)
.setSparkHome(sys.props("spark.test.home"))
.setConf(SparkLauncher.DRIVER_EXTRA_CLASSPATH, System.getProperty("java.class.path"))
.setConf(UI_ENABLED.key, "false")
.setConf(SparkLauncher.DRIVER_EXTRA_JAVA_OPTIONS, s"-Dtest.appender=console")
.setMaster(master)
.setAppResource(SparkLauncher.NO_RESOURCE)
.setMainClass(TestApp.getClass.getName().stripSuffix("$"))
.startApplication()
try {
eventually(timeout(30.seconds), interval(100.milliseconds)) {
handle.getAppId() should not be (null)
}
handle.stop()
eventually(timeout(30.seconds), interval(100.milliseconds)) {
handle.getState() should be (SparkAppHandle.State.KILLED)
}
} finally {
handle.kill()
}
}
}
object TestApp {
def main(args: Array[String]): Unit = {
new SparkContext(new SparkConf()).parallelize(Seq(1)).foreach { i =>
Thread.sleep(TimeUnit.SECONDS.toMillis(20))
}
}
}
|
maropu/spark
|
core/src/test/scala/org/apache/spark/launcher/LauncherBackendSuite.scala
|
Scala
|
apache-2.0
| 2,747 |
import annotation.alpha
class Gamma {
def foo: Int = 1
}
class Delta extends Gamma { // error: name clash
@alpha("foo") def bar: Int = 1
}
|
som-snytt/dotty
|
tests/neg/alpha-late.scala
|
Scala
|
apache-2.0
| 152 |
package scutil.log
import scutil.lang._
import scutil.lang.tc.Show
object LogValue {
implicit def StringAsLogValue(it:String):LogValue = LogValue string it
implicit def ThrowableAsLogValue(it:Throwable):LogValue = LogValue throwable it
implicit def MultipleAsLogValue(it:Seq[LogValue]):LogValue = LogValue multiple it
implicit def ShowAsLogValue[T:Show](it:T):LogValue = LogValue string (Show doit it)
implicit def SeqShowAsLogValue[T](it:Seq[T])(implicit S:Show[T]):LogValue = LogValue multiple (it map S.show map LogValue.string)
implicit def SetShowAsLogValue[T](it:Set[T])(implicit S:Show[T]):LogValue = SeqShowAsLogValue(it.toVector)
implicit def NesShowAsLogValue[T](it:Nes[T])(implicit S:Show[T]):LogValue = SeqShowAsLogValue(it.toSeq)
//------------------------------------------------------------------------------
def string(it:String):LogValue = LogString(it)
def throwable(it:Throwable):LogValue = LogThrowable(it)
def multiple(it:Seq[LogValue]):LogValue = LogMultiple(it)
def variable(it:LogValue*):LogValue = LogMultiple(it)
//------------------------------------------------------------------------------
final case class LogString(value:String) extends LogValue
final case class LogThrowable(value:Throwable) extends LogValue
final case class LogMultiple(values:Seq[LogValue]) extends LogValue
}
sealed trait LogValue {
def atoms:Seq[LogAtom] =
this match {
case LogValue.LogString(x) => Vector(LogAtom.LogString(x))
case LogValue.LogThrowable(x) => Vector(LogAtom.LogThrowable(x))
case LogValue.LogMultiple(x) => x flatMap (_.atoms)
}
}
|
ritschwumm/scutil
|
modules/jdk/src/main/scala/scutil/log/LogValue.scala
|
Scala
|
bsd-2-clause
| 1,611 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.hibench.sparkbench.ml
import org.apache.hadoop.io.LongWritable
import org.apache.log4j.{Level, Logger}
import org.apache.mahout.math.VectorWritable
import org.apache.spark.mllib.clustering.KMeans
import org.apache.spark.mllib.linalg.Vectors
import org.apache.spark.{SparkConf, SparkContext}
import scopt.OptionParser
/**
*
* An example k-means app. Run with
* {{{
* ./bin/run-example org.apache.spark.examples.mllib.DenseKMeans [options] <input>
* }}}
* If you use it as a template to create your own app, please use `spark-submit` to submit your app.
*/
object DenseKMeans {
object InitializationMode extends Enumeration {
type InitializationMode = Value
val Random, Parallel = Value
}
import com.intel.hibench.sparkbench.ml.DenseKMeans.InitializationMode._
case class Params(
input: String = null,
k: Int = -1,
numIterations: Int = 10,
initializationMode: InitializationMode = Parallel)
def main(args: Array[String]) {
val defaultParams = Params()
val parser = new OptionParser[Params]("DenseKMeans") {
head("DenseKMeans: an example k-means app for dense data.")
opt[Int]('k', "k")
.required()
.text(s"number of clusters, required")
.action((x, c) => c.copy(k = x))
opt[Int]("numIterations")
.text(s"number of iterations, default; ${defaultParams.numIterations}")
.action((x, c) => c.copy(numIterations = x))
opt[String]("initMode")
.text(s"initialization mode (${InitializationMode.values.mkString(",")}), " +
s"default: ${defaultParams.initializationMode}")
.action((x, c) => c.copy(initializationMode = InitializationMode.withName(x)))
arg[String]("<input>")
.text("input paths to examples")
.required()
.action((x, c) => c.copy(input = x))
}
parser.parse(args, defaultParams).map { params =>
run(params)
}.getOrElse {
sys.exit(1)
}
}
def run(params: Params) {
val conf = new SparkConf().setAppName(s"DenseKMeans with $params")
val sc = new SparkContext(conf)
// Logger.getRootLogger.setLevel(Level.WARN)
val data = sc.sequenceFile[LongWritable, VectorWritable](params.input)
val examples = data.map { case (k, v) =>
var vector: Array[Double] = new Array[Double](v.get().size)
for (i <- 0 until v.get().size) vector(i) = v.get().get(i)
Vectors.dense(vector)
}.cache()
// val examples = sc.textFile(params.input).map { line =>
// Vectors.dense(line.split(' ').map(_.toDouble))
// }.cache()
val numExamples = examples.count()
println(s"numExamples = $numExamples.")
val initMode = params.initializationMode match {
case Random => KMeans.RANDOM
case Parallel => KMeans.K_MEANS_PARALLEL
}
val model = new KMeans()
.setInitializationMode(initMode)
.setK(params.k)
.setMaxIterations(params.numIterations)
.run(examples)
val cost = model.computeCost(examples)
println(s"Total cost = $cost.")
sc.stop()
}
}
|
nareshgundla/HiBench
|
sparkbench/ml/src/main/scala/com/intel/sparkbench/ml/DenseKMeans.scala
|
Scala
|
apache-2.0
| 3,874 |
package models
import java.util.UUID
import play.api.libs.functional.syntax._
import play.api.libs.json._
sealed trait LabworkLike extends UniqueEntity {
def label: String
}
case class Labwork(label: String, description: String, semester: UUID, course: UUID, degree: UUID, subscribable: Boolean = false, published: Boolean = false, id: UUID = UUID.randomUUID) extends LabworkLike
case class LabworkAtom(label: String, description: String, semester: Semester, course: CourseAtom, degree: Degree, subscribable: Boolean, published: Boolean, id: UUID) extends LabworkLike
case class LabworkProtocol(label: String, description: String, semester: UUID, course: UUID, degree: UUID, subscribable: Boolean, published: Boolean)
object Labwork {
implicit val writes: Writes[Labwork] = Json.writes[Labwork]
}
object LabworkProtocol {
implicit val reads: Reads[LabworkProtocol] = Json.reads[LabworkProtocol]
}
object LabworkLike {
implicit val writes: Writes[LabworkLike] = {
case normal: Labwork => Json.toJson(normal)(Labwork.writes)
case atom: LabworkAtom => Json.toJson(atom)(LabworkAtom.writes)
}
}
object LabworkAtom {
implicit val writes: Writes[LabworkAtom] = (
(JsPath \\ "label").write[String] and
(JsPath \\ "description").write[String] and
(JsPath \\ "semester").write[Semester](Semester.writes) and
(JsPath \\ "course").write[CourseAtom](CourseAtom.writes) and
(JsPath \\ "degree").write[Degree](Degree.writes) and
(JsPath \\ "subscribable").write[Boolean] and
(JsPath \\ "published").write[Boolean] and
(JsPath \\ "id").write[UUID]
) (unlift(LabworkAtom.unapply))
}
|
THK-ADV/lwm-reloaded
|
app/models/Labwork.scala
|
Scala
|
mit
| 1,642 |
object Test {
def f() = {
val ar = Array.ofDim[Int](5)
var x = 0
while (x<=5) {
println(x)
@annotation.unused val a = ar(x)
x+=1
}
}
def main(args: Array[String]): Unit = {
try { f() ; assert(false, "should have thrown exception") }
catch { case _: ArrayIndexOutOfBoundsException => () }
}
}
|
scala/scala
|
test/files/run/optimizer-array-load.scala
|
Scala
|
apache-2.0
| 345 |
/**
* Copyright (C) 2014 Orbeon, Inc.
*
* This program is free software; you can redistribute it and/or modify it under the terms of the
* GNU Lesser General Public License as published by the Free Software Foundation; either version
* 2.1 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Lesser General Public License for more details.
*
* The full text of the license is available at http://www.gnu.org/copyleft/lesser.html
*/
package org.orbeon.oxf.xforms.processor
import org.orbeon.oxf.xforms.state.XFormsStaticStateCache.CacheTracer
import org.orbeon.oxf.util.IndentedLogger
class LoggingCacheTracer(logger: IndentedLogger) extends CacheTracer {
def digestAndTemplateStatus(digestIfFound: Option[String]): Unit =
digestIfFound match {
case Some(digest) => logger.logDebug("", "template and static state digest obtained from cache", "digest", digest)
case None => logger.logDebug("", "template and static state digest not obtained from cache.")
}
def staticStateStatus(found: Boolean, digest: String): Unit =
if (found)
logger.logDebug("", "found up-to-date static state by digest in cache", "digest", digest)
else
logger.logDebug("", "did not find static state by digest in cache", "digest", digest)
}
|
orbeon/orbeon-forms
|
xforms/jvm/src/main/scala/org/orbeon/oxf/xforms/processor/LoggingCacheTracer.scala
|
Scala
|
lgpl-2.1
| 1,482 |
package org.jetbrains.plugins.scala
package editor
package enterHandler
import com.intellij.codeInsight.CodeInsightSettings
import com.intellij.codeInsight.editorActions.enter.EnterHandlerDelegate.Result
import com.intellij.codeInsight.editorActions.enter.EnterHandlerDelegateAdapter
import com.intellij.openapi.actionSystem.DataContext
import com.intellij.openapi.editor.Editor
import com.intellij.openapi.editor.actionSystem.EditorActionHandler
import com.intellij.openapi.util.Ref
import com.intellij.psi.PsiFile
import com.intellij.psi.codeStyle.CodeStyleManager
import org.apache.commons.lang3.StringUtils
import org.jetbrains.plugins.scala.extensions._
import org.jetbrains.plugins.scala.lang.psi.api.ScalaFile
import org.jetbrains.plugins.scala.lang.psi.api.base.ScLiteral
import org.jetbrains.plugins.scala.util.MultilineStringUtil.MultilineQuotes
import org.jetbrains.plugins.scala.util.{MultilineStringSettings, MultilineStringUtil}
// TODO: add Scala prefix for all handlers for easy debug
class MultilineStringEnterHandler extends EnterHandlerDelegateAdapter {
private var wasInMultilineString: Boolean = false
private var whiteSpaceAfterCaret: String = ""
override def preprocessEnter(file: PsiFile, editor: Editor, caretOffsetRef: Ref[Integer], caretAdvance: Ref[Integer],
dataContext: DataContext, originalHandler: EditorActionHandler): Result = {
val caretOffset = caretOffsetRef.get.intValue
if (!file.is[ScalaFile] || !editor.inScalaString(caretOffset)) return Result.Continue
val document = editor.getDocument
val text = document.getImmutableCharSequence
if (caretOffset == 0 || caretOffset >= text.length()) return Result.Continue
val element = file.findElementAt(caretOffset)
if (element == null || !MultilineStringUtil.inMultilineString(element)) return Result.Continue
wasInMultilineString = true
whiteSpaceAfterCaret = whitespaceAfter(text, caretOffset)
document.deleteString(caretOffset, caretOffset + whiteSpaceAfterCaret.length)
val caretBetweenBrackets = {
val ch1 = text.charAt(caretOffset - 1)
val ch2 = text.charAt(caretOffset)
val caretBetweenParens = ch1 == '(' && ch2 == ')'
val caretBetweenBraces = ch1 == '{' && ch2 == '}'
caretBetweenParens || caretBetweenBraces
}
if (caretBetweenBrackets && CodeInsightSettings.getInstance.SMART_INDENT_ON_ENTER) {
originalHandler.execute(editor, editor.getCaretModel.getCurrentCaret, dataContext)
Result.DefaultForceIndent
} else {
Result.Continue
}
}
override def postProcessEnter(file: PsiFile, editor: Editor, dataContext: DataContext): Result = {
if (!file.is[ScalaFile]) return Result.Continue
if (!wasInMultilineString) return Result.Continue
wasInMultilineString = false
val project = file.getProject
val document = editor.getDocument
document.commit(project) // TODO: AVOID COMMITTING DOCUMENTS ON TYPING!
val caretModel = editor.getCaretModel
val offset = caretModel.getOffset
val caretMarker = document.createRangeMarker(offset, offset)
caretMarker.setGreedyToRight(true)
def caretOffset = caretMarker.getEndOffset
val element = file.findElementAt(offset)
if (element == null) return Result.Continue
val literal: ScLiteral = MultilineStringUtil.findParentMLString(element) match {
case Some(v) => v
case _ => return Result.Continue
}
val literalOffset: Int = literal.getTextRange.getStartOffset
val interpolRef: String = MultilineStringUtil.interpolatorPrefix(literal)
val firstMLQuote: String = interpolRef + MultilineQuotes
val firstMLQuoteLength: Int = firstMLQuote.length
val settings = new MultilineStringSettings(project)
import settings._
if (!settings.supportMultilineString || offset - literalOffset < firstMLQuoteLength)
return Result.Continue
def getLineByNumber(number: Int): String = {
val sequence = document.getImmutableCharSequence
val start = document.getLineStartOffset(number)
val end = document.getLineEndOffset(number)
sequence.substring(start, end)
}
def insertNewLine(nlOffset: Int, indent: Int, trimPreviousLine: Boolean,
marginChar: Option[Char] = None): Unit = {
document.insertString(nlOffset, "\\n")
forceIndent(nlOffset + 1, indent, marginChar)
if (trimPreviousLine) {
val line = getLineByNumber(document.getLineNumber(nlOffset))
var i = 0
def charToCheck = line.charAt(line.length - 1 - i)
while (i <= line.length - 1 && (charToCheck == ' ' || charToCheck == '\\t')) {
i += 1
}
document.deleteString(nlOffset - i, nlOffset)
}
}
def forceIndent(offset: Int, indent: Int, marginChar: Option[Char]): Unit = {
val lineNumber = document.getLineNumber(offset)
val lineStart = document.getLineStartOffset(lineNumber)
val line = getLineByNumber(lineNumber)
val wsPrefix = line.takeWhile(c => c == ' ' || c == '\\t')
document.replaceString(lineStart, lineStart + wsPrefix.length, getSmartSpaces(indent) + marginChar.getOrElse(""))
}
inWriteAction {
val currentLineNumber = document.getLineNumber(offset)
val prevLineNumber = currentLineNumber - 1
val nextLineNumber = currentLineNumber + 1
assert(prevLineNumber >= 0)
val prevLine = getLineByNumber(prevLineNumber)
val currentLine = getLineByNumber(prevLineNumber + 1)
val nextLine = if (document.getLineCount > nextLineNumber) getLineByNumber(prevLineNumber + 2) else ""
def prevLinePrefixAfterDelimiter(offsetInLine: Int): Int =
StringUtils.substring(prevLine, offsetInLine).segmentLength(c => c == ' ' || c == '\\t')
val literalText = literal.getText
val lines = literalText.split("\\n")
val marginChar: Char = MultilineStringUtil.getMarginChar(element)
val marginCharOpt: Option[Char] = {
if (settings.insertMargin && (
lines.length > 3 ||
MultilineStringUtil.hasMarginChars(element, marginChar.toString) ||
MultilineStringUtil.needAddByType(literal))) {
Some(marginChar)
} else {
None
}
}
lazy val insertedBracketsOnSingleLine = lines.length == 3 && {
def betweenBrackets = lines(0).endsWith('(') && lines(2).trim.startsWith(')')
def betweenBraces = lines(0).endsWith('{') && lines(2).trim.startsWith('}')
def caretIsBetweenBrackets = currentLineNumber == document.getLineNumber(literal.getTextRange.getStartOffset) + 1
(betweenBrackets || betweenBraces) && caretIsBetweenBrackets
}
def handleEnterInsideMultilineExpanded(): Unit = {
if (settings.insertMargin && MultilineStringUtil.needAddByType(literal)) {
MultilineStringUtil.insertStripMargin(document, literal, marginChar)
}
val needNewLineBeforeLiteral = quotesOnNewLine && !literal.startsFromNewLine(false)
if (needNewLineBeforeLiteral) {
insertNewLine(literalOffset, 0, trimPreviousLine = true)
}
val manager = CodeStyleManager.getInstance(project)
val newLinesAdded = insertedBracketsOnSingleLine.toInt + needNewLineBeforeLiteral.toInt
manager.adjustLineIndent(document, document.getLineStartOffset(currentLineNumber))
val firstLineIndent: Int = {
val lineIdx = prevLineNumber + needNewLineBeforeLiteral.toInt
val lineOffset = document.getLineStartOffset(lineIdx)
val indentStr = manager.getLineIndent(document, lineOffset)
calcIndentSize(indentStr)
}
val quotesIndent = firstLineIndent + interpolRef.length
forceIndent(caretOffset, quotesIndent + marginIndent, marginCharOpt)
if (insertedBracketsOnSingleLine) {
forceIndent(caretOffset + 1, quotesIndent, marginCharOpt)
}
document.commit(project) // TODO: AVOID COMMITTING DOCUMENTS ON TYPING!
if (settings.insertMargin) {
for {
lineIdx <- nextLineNumber to currentLineNumber + newLinesAdded
if lineIdx < document.getLineCount
} manager.adjustLineIndent(document, document.getLineStartOffset(lineIdx))
}
val closingQuotesOnNewLine =
settings.closingQuotesOnNewLine && literalText.substring(offset - literalOffset) == MultilineQuotes
if (closingQuotesOnNewLine) {
caretMarker.setGreedyToRight(false)
insertNewLine(caretOffset, quotesIndent, trimPreviousLine = false, marginCharOpt)
caretMarker.setGreedyToRight(true)
if (marginCharOpt.isDefined) {
manager.adjustLineIndent(document, document.getLineStartOffset(currentLineNumber + newLinesAdded + 1))
}
}
}
def handleEnterInsideMultiline(): Unit = {
val prevLineOffset = document.getLineStartOffset(prevLineNumber)
val currentLineOffset = document.getLineStartOffset(currentLineNumber)
val prevLineTrimmed = prevLine.trim
val isPrevLineFirst = prevLineTrimmed.startsWith(firstMLQuote)
val wsPrefixLength: Int = prevLine.segmentLength(c => c == ' ' || c == '\\t')
val quotesOptLength = if (isPrevLineFirst) firstMLQuoteLength else 0
val prevLineStriped: String = {
val idx = wsPrefixLength + quotesOptLength
prevLine.substring(idx)
}
def handleEnterWithMargin(): Unit = {
val currentLineHasMarginChar = currentLine.trim.startsWith(marginChar)
if (currentLineHasMarginChar) return
val inBraces = prevLine.endsWith('{') && nextLine.trim.startsWith('}') || prevLine.endsWith('(') && nextLine.trim.startsWith(')')
val prefix: String = {
if (inBraces)
getPrefix(prevLine) + getSmartSpaces(quotesOptLength)
else if (prevLineStriped.trim.startsWith(marginChar))
getPrefix(prevLine) + getSmartSpaces(quotesOptLength)
else if (nextLine.trim.startsWith(marginChar))
getPrefix(nextLine)
else
getPrefix(currentLine)
}
val indentSizeAfterMargin: Int = {
val offsetToContent =
if (isPrevLineFirst) firstMLQuoteLength + prevLineStriped.startsWith(marginChar).toInt
else 1
prevLinePrefixAfterDelimiter(wsPrefixLength + offsetToContent)
}
forceIndent(caretOffset, getSmartLength(prefix), marginCharOpt)
document.insertString(caretOffset, getSpaces(indentSizeAfterMargin))
if (inBraces) {
val nextLineOffset = document.getLineStartOffset(prevLineNumber + 2)
forceIndent(nextLineOffset, 0, None)
document.insertString(nextLineOffset, marginChar.toString + getSpaces(indentSizeAfterMargin))
forceIndent(nextLineOffset, getSmartLength(prefix), None)
}
}
def handleEnterWithoutMargin(): Unit = {
val isCurrentLineEmpty = StringUtils.isBlank(currentLine)
val isPrevLineEmpty = prevLine.trim.isEmpty
if (prevLineOffset < literalOffset) {
val beforeQuotes = prevLinePrefixAfterDelimiter(0)
val elementStart = prevLine.indexOf(firstMLQuote) + firstMLQuoteLength
val prevLineWsPrefixAfterQuotes = prevLinePrefixAfterDelimiter(elementStart)
val spacesToInsert =
if (isPrevLineFirst) {
beforeQuotes + firstMLQuoteLength + prevLineWsPrefixAfterQuotes
} else {
val shiftLeft = if (isCurrentLineEmpty) 0 else wsPrefixLength
elementStart - shiftLeft + prevLineWsPrefixAfterQuotes
}
forceIndent(currentLineOffset, getSmartLength(getSmartSpaces(spacesToInsert)), None)
}
else if (isCurrentLineEmpty && !isPrevLineEmpty) {
forceIndent(caretOffset, wsPrefixLength, None)
}
else if (isPrevLineEmpty) {
forceIndent(caretOffset, prevLine.length, None)
}
else if (isPrevLineFirst) {
val wsAfterQuotes = prevLinePrefixAfterDelimiter(wsPrefixLength + firstMLQuoteLength) + firstMLQuoteLength
forceIndent(caretOffset, wsAfterQuotes, None)
}
}
val literalAlreadyHasLineMargin: Boolean = {
// first line can contain quotes, so check stripped content
def prevLineHasMargin = prevLineStriped.startsWith(marginChar)
def otherLinesHaveMargin = lines.exists(_.trim.startsWith(marginChar))
prevLineHasMargin || otherLinesHaveMargin
}
if (literalAlreadyHasLineMargin && settings.insertMargin) {
handleEnterWithMargin()
} else {
handleEnterWithoutMargin()
}
}
val wasSingleLine = lines.length <= 2 || insertedBracketsOnSingleLine
if (wasSingleLine) {
handleEnterInsideMultilineExpanded()
} else {
handleEnterInsideMultiline()
}
document.insertString(caretOffset, whiteSpaceAfterCaret)
caretModel.moveToOffset(caretOffset)
caretMarker.dispose()
}
Result.Stop
}
private def whitespaceAfter(chars: CharSequence, offset: Int): String = {
val iterator = Iterator.range(offset, chars.length() - 1).map(chars.charAt)
iterator.takeWhile(c => c == ' ' || c == '\\t').mkString
}
}
|
JetBrains/intellij-scala
|
scala/scala-impl/src/org/jetbrains/plugins/scala/editor/enterHandler/MultilineStringEnterHandler.scala
|
Scala
|
apache-2.0
| 13,509 |
/*
* SPDX-License-Identifier: Apache-2.0
*
* Copyright 2015-2021 Andre White.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.truthencode.ddo.enumeration
import enumeratum.{Enum, EnumEntry}
import io.truthencode.ddo.support.StringUtils.Extensions
import scala.collection.immutable
/**
* Created by adarr on 1/16/2017.
*/
object EnumExtensions {
// private def findEnum[E <: EnumEntry: Enum[E]](v: E) = implicitly[Enum[E]]
private def findEnum[E <: EnumEntry: Enum, A <: Enum[_ <: EnumEntry]](v: E) = implicitly[Enum[E]]
// final implicit class EnumCompanionOps[E <: EnumEntry:Enum[E],A <: Enum[E]](
implicit final class EnumCompanionOps[A <: Enum[_ <: EnumEntry]](
val comp: A
) {
def exists(id: String): Boolean = {
comp.namesToValuesMap.contains(id)
}
/**
* Attempts to locate a matching enumeration based on a list of potential values.
*
* @param names
* List of string values to try. i.e. Red, blue, bLaCk for a color.
* @param ignoreCase
* toggles case sensitivity in search.
* @return
* Returns the first Enum value found matching any of the given supplied names.
*/
def withNames(
names: List[String],
ignoreCase: Boolean = false
): Option[immutable.IndexedSeq[_ <: EnumEntry]] = {
val sanitized: List[String] = names.map { x =>
x.filterAlphaNumeric
}
for {
sc <- Some(comp.values.filter { x =>
(ignoreCase && sanitized
.exists(n => n.equalsIgnoreCase(x.toString))) || sanitized.contains(
x.toString
)
}) if sc.nonEmpty
} yield sc
}
def withName(
name: String,
ignoreCase: Boolean = false
): Option[_ <: EnumEntry] = {
if (ignoreCase) {
comp.withNameInsensitiveOption(name)
} else {
comp.withNameOption(name)
}
}
/**
* A list of enum values matching the BitMask
*
* @param flag
* Bit value to compare
* @return
* All matching values
*/
def fromMask(flag: Int): Option[Seq[EnumEntry]] = {
// comp.bitValues.filter {x => (x._2 & flag != 0)}
// val zz :E = comp.values.head
// val it = comp.bitValues.
for {
sc <- Some(bitValues.filter { x =>
(x._2.toInt & flag) != 0
}.keys) if sc.nonEmpty
} yield sc.toSeq
}
def bitValues: Map[EnumEntry, Double] = comp.valuesToIndex.map { x =>
x._1 -> Math.pow(2.0, x._2)
}
def fromWords(words: String): Option[EnumEntry] = {
words.wordsToAcronym match {
case Some(x) => comp.withNameOption(x.toPascalCase)
case _ => None
}
}
}
implicit final class E2[E <: EnumEntry: Enum](val e: E) {
def foo = {
// e.bitValues
}
}
}
|
adarro/ddo-calc
|
subprojects/common/ddo-core/src/main/scala/io/truthencode/ddo/enumeration/EnumExtensions.scala
|
Scala
|
apache-2.0
| 3,457 |
/*
* Copyright 2012 The SIRIS Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* The SIRIS Project is a cooperation between Beuth University, Berlin and the
* HCI Group at the University of Würzburg. The project is funded by the German
* Federal Ministry of Education and Research (grant no. 17N4409).
*/
package simx.components.renderer.jvr
import de.bht.jvr.core.{Transform, VRCameraNode, SceneNode}
/**
* The abstract class CameraUpdater is the base class for every class which keeps two cameras in sync.
* It it used internally for mirror effects.
*
* @author Stephan Rehfeld
*/
private[jvr] abstract class CameraUpdater {
/**
* The update method is called by the render connector to sync the cameras.
*/
def update()
}
/**
* This CameraUpdater connects to cameras to render a mirror plane. The transformation
* of the reference camera is mirrored at the mirror plane and written to the mirror camera.
* Over and above the head position and eye separation is kept in sync.
*
* @author Stephan Rehfeld
*
* @param mirrorCamera The camera that is used to render the image of the surface. Values get written in this object.
* @param referenceCamera The original camera that is used to render the scene. Value are read from this object.
* @param mirrorPlane The mirror plane. The transformation of the mirror camera is calculated in relation to the mirror plane.
*/
private[jvr] class MirrorCameraUpdater( mirrorCamera : VRCameraNode, referenceCamera : VRCameraNode, mirrorPlane : SceneNode ) extends CameraUpdater {
require( mirrorCamera != null, "The parameter 'mirrorCamera' must not be 'null'!" )
require( referenceCamera != null, "The parameter 'referenceCamera' must not be 'null'!" )
require( referenceCamera != null, "The parameter 'mirrorPlane' must not be 'null'!" )
override def update() {
mirrorCamera.setTransform( mirrorPlane.getTransform.mul( Transform.scale(1,1,-1).mul( mirrorPlane.getTransform.invert.mul( referenceCamera.getTransform ) ) ) )
mirrorCamera.setHeadTransform( referenceCamera.getHeadTransform )
mirrorCamera.setEyeSeparation( referenceCamera.getEyeSeparation )
}
override def toString = "This MirrorCameraUpdater keeps the camera " + " in sync with mirror camera " + mirrorCamera + " relative to the plane " + mirrorPlane + "."
}
|
simulator-x/jvr-rendering
|
src/simx/components/renderer/jvr/CameraUpdater.scala
|
Scala
|
apache-2.0
| 2,872 |
/**
* FILE: ResponderActor.scala
* PERCORSO /Codice/sgad/servertier/src/main/scala/sgad/servertier/presentation/httpresponder
* DATA CREAZIONE: 23 Febbraio 2014
* AUTORE: ProTech
* EMAIL: [email protected]
*
* Questo file è proprietà del gruppo ProTech, viene rilasciato sotto licenza Apache v2.
*
* DIARIO DELLE MODIFICHE:
* 2014-02-23 - Creazione della classe - Biancucci Maurizio
*/
package sgad.servertier.presentation.httpresponder
import akka.actor.{Props, ActorLogging}
import spray.http._
import spray.routing._
import akka.pattern._
import scala.language.postfixOps
import spray.http.HttpHeaders.RawHeader
import scala.concurrent.duration._
import sgad.servertier.presentation.messages.{ToWorkerUserRequest, ToWorkerRegistrationRequest, ToWorkerLoginRequest}
import MediaTypes._
import sgad.servertier.presentation.pagemanager.PageFactory
import sgad.servertier.presentation.timeout.STimeout
import spray.util.LoggingContext
/**
* Classe per la gestione degli end point per le richieste HTTP inviate dal client.
* @constructor
* @param workingDirectory Percorso assoluto da cui l'applicazione è stata lanciata.
*/
class ResponderActor(private val workingDirectory: String) extends HttpServiceActor with ActorLogging {
/**
* Il dispatcher dell'actor system che poter creare a gestire i Timeout delle richieste.
*/
implicit def executionContext = actorRefFactory.dispatcher
/**
* Metodo per la gestione degli errori durante il routing.
* @param log Riferimento implicito al log.
* @return Lo stato da ritornare al client connesso.
*/
implicit def exceptionHandler(implicit log: LoggingContext) =
ExceptionHandler {
case _: akka.pattern.AskTimeoutException => ctx =>
log.warning("{} errore mentre gestivo la richiesta: {}", "Server sovraccarico", ctx.request)
ctx.complete(StatusCodes.TooManyRequests, "Server sovraccarico")
}
/**
* Definisce gli end point HTTP e il modo in cui viene gestita ogni richiesta.
*/
val myRoute =
get {
//Fornisce un endpoint per la home pagina dove la risposta viene effettuata da memoria rendendo il caricamento molto veloce.
path("") {
respondWithMediaType(`text/html`) {
complete {
PageFactory.getHomePage
}
}
} ~
// Definisce un endpoint per l'accesso al sito del gioco e tutti i file contenuti nella cartella webpages.
pathPrefix("") {
getFromDirectory(workingDirectory + "/src/main/resources/webpages/")
} ~
// Definisce un endpoint in cui sono disponibili tutti i file contenuti nella cartella images del gioco.
pathPrefix("canvas" / "images") {
getFromDirectory(workingDirectory + "/src/main/resources/canvas/images/")
} ~
pathPrefix("canvas" / "jquery") {
getFromDirectory(workingDirectory + "/src/main/resources/canvas/jquery/")
} ~
// Definisce un endpoint in cui è disponibile la home page quando per qualche errore il client ricarica la pagina del gioco.
path("login") {
respondWithMediaType(`text/html`) {
complete {
PageFactory.getHomePage
}
}
} ~
path("registration") {
respondWithMediaType(`text/html`) {
complete {
PageFactory.getHomePage
}
}
}
} ~
// Definisce un end point per le richieste di login.
post {
path("login") {
respondWithMediaType(`text/html`) {
formFields('user.as[String], 'password.as[String]) {
(user, password) =>
val request = ToWorkerLoginRequest(user, password)
complete {
actorRefFactory.actorOf(Props[WorkerActor]).ask(request)(STimeout.getHttpRequestTimeoutS seconds).mapTo[String]
}
}
}
}
} ~
// Definisce un end point per le richieste di registrazione.
post {
path("registration") {
respondWithMediaType(`text/html`) {
formFields('user.as[String], 'email.as[String], 'password1.as[String], 'password2.as[String]) {
(user, email, password1, password2) =>
val request = ToWorkerRegistrationRequest(user, email, password1, password2)
complete {
actorRefFactory.actorOf(Props[WorkerActor]).ask(request)(STimeout.getHttpRequestTimeoutS seconds).mapTo[String]
}
}
}
}
} ~
// Definisce un end point per le richieste di gioco.
post {
path("user") {
respondWithHeaders(RawHeader("Access-Control-Allow-Origin", "*"), RawHeader("Access-Control-Allow-Methods", "GET,HEAD,POST,OPTIONS,TRACE"),
RawHeader("Access-Control-Allow-Headers", "*, X-Requested-With, Content-Type, Accept")) {
formFields('user.as[String], 'operation.as[String], 'data.as[String]) {
(user, operation, data) =>
val request = ToWorkerUserRequest(user, operation, data)
complete {
actorRefFactory.actorOf(Props[WorkerActor]).ask(request)(STimeout.getHttpRequestTimeoutS seconds).mapTo[String]
}
}
}
}
}
/**
* Risponde alle richieste HTTP con l'utilizzo dell'attributo myRoute o nel caso gestisce il timeout nella richiesta.
*/
def receive = handleTimeouts orElse runRoute(myRoute)
/**
* Gestisce gli errori del server durante l'elaborazione di una risposta ad una richiesta HTTP.
*/
def handleTimeouts: Receive = {
//Caso di timeout nell'elaborazione della risposta alla richiesta HTTP.
case Timedout(x: HttpRequest) =>
sender ! HttpResponse(StatusCodes.InternalServerError, "Il server è momentaneamente sovraccarico, riprovare più tardi")
}
}
|
protechunipd/SGAD
|
Codice/sgad/servertier/src/main/scala/sgad/servertier/presentation/httpresponder/ResponderActor.scala
|
Scala
|
apache-2.0
| 5,480 |
/**
* Copyright 2015 Mohiva Organisation (license at mohiva dot com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.mohiva.play.silhouette.impl
/**
* Reference implementations of the authenticators.
*/
package object authenticators
|
mohiva/play-silhouette
|
silhouette/app/com/mohiva/play/silhouette/impl/authenticators/package.scala
|
Scala
|
apache-2.0
| 763 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.openwhisk.core.entity
import java.time.Instant
import scala.concurrent.Future
import scala.util.Try
import spray.json._
import spray.json.DefaultJsonProtocol._
import org.apache.openwhisk.common.TransactionId
import org.apache.openwhisk.core.ConfigKeys
import org.apache.openwhisk.core.database.{ArtifactStore, CacheChangeNotification, DocumentFactory, StaleParameter}
import pureconfig._
/**
* A WhiskActivation provides an abstraction of the meta-data
* for a whisk action activation record.
*
* The WhiskActivation object is used as a helper to adapt objects between
* the schema used by the database and the WhiskAuth abstraction.
*
* @param namespace the namespace for the activation
* @param name the name of the activated entity
* @param subject the subject activating the entity
* @param activationId the activation id
* @param start the start of the activation in epoch millis
* @param end the end of the activation in epoch millis
* @param cause the activation id of the activated entity that causes this activation
* @param response the activation response
* @param logs the activation logs
* @param version the semantic version (usually matches the activated entity)
* @param publish true to share the activation or false otherwise
* @param annotations the set of annotations to attribute to the activation
* @param duration of the activation in milliseconds
* @throws IllegalArgumentException if any required argument is undefined
*/
@throws[IllegalArgumentException]
case class WhiskActivation(namespace: EntityPath,
override val name: EntityName,
subject: Subject,
activationId: ActivationId,
start: Instant,
end: Instant,
cause: Option[ActivationId] = None,
response: ActivationResponse = ActivationResponse.success(),
logs: ActivationLogs = ActivationLogs(),
version: SemVer = SemVer(),
publish: Boolean = false,
annotations: Parameters = Parameters(),
duration: Option[Long] = None)
extends WhiskEntity(EntityName(activationId.asString), "activation") {
require(cause != null, "cause undefined")
require(start != null, "start undefined")
require(end != null, "end undefined")
require(response != null, "response undefined")
def toJson = WhiskActivation.serdes.write(this).asJsObject
/**
* This the activation summary as computed by the database view.
* Strictly used in view testing to enforce alignment.
*/
override def summaryAsJson = {
import WhiskActivation.instantSerdes
def actionOrNot() = {
if (end != Instant.EPOCH) {
Map(
"end" -> end.toJson,
"duration" -> (duration getOrElse (end.toEpochMilli - start.toEpochMilli)).toJson,
"statusCode" -> response.statusCode.toJson)
} else Map.empty
}
JsObject(
super.summaryAsJson.fields - "updated" +
("activationId" -> activationId.toJson) +
("start" -> start.toJson) ++
cause.map(("cause" -> _.toJson)) ++
actionOrNot())
}
def resultAsJson = response.result.toJson.asJsObject
def toExtendedJson = {
val JsObject(baseFields) = WhiskActivation.serdes.write(this).asJsObject
val newFields = (baseFields - "response") + ("response" -> response.toExtendedJson)
if (end != Instant.EPOCH) {
val durationValue = (duration getOrElse (end.toEpochMilli - start.toEpochMilli)).toJson
JsObject(newFields + ("duration" -> durationValue))
} else {
JsObject(newFields - "end")
}
}
def withoutLogsOrResult = {
copy(response = response.withoutResult, logs = ActivationLogs()).revision[WhiskActivation](rev)
}
def withoutLogs = copy(logs = ActivationLogs()).revision[WhiskActivation](rev)
def withLogs(logs: ActivationLogs) = copy(logs = logs).revision[WhiskActivation](rev)
}
object WhiskActivation
extends DocumentFactory[WhiskActivation]
with WhiskEntityQueries[WhiskActivation]
with DefaultJsonProtocol {
/** Some field names for annotations */
val pathAnnotation = "path"
val kindAnnotation = "kind"
val limitsAnnotation = "limits"
val topmostAnnotation = "topmost"
val causedByAnnotation = "causedBy"
val initTimeAnnotation = "initTime"
val waitTimeAnnotation = "waitTime"
val conductorAnnotation = "conductor"
/** Some field names for compositions */
val actionField = "action"
val paramsField = "params"
val stateField = "state"
val valueField = "value"
protected[entity] implicit val instantSerdes = new RootJsonFormat[Instant] {
def write(t: Instant) = t.toEpochMilli.toJson
def read(value: JsValue) =
Try {
value match {
case JsString(t) => Instant.parse(t)
case JsNumber(i) => Instant.ofEpochMilli(i.bigDecimal.longValue)
case _ => deserializationError("timestamp malformed")
}
} getOrElse deserializationError("timestamp malformed")
}
override val collectionName = "activations"
private val dbConfig = loadConfigOrThrow[DBConfig](ConfigKeys.db)
private val mainDdoc = dbConfig.activationsDdoc
private val filtersDdoc = dbConfig.activationsFilterDdoc
/** The main view for activations, keyed by namespace, sorted by date. */
override lazy val view = WhiskEntityQueries.view(mainDdoc, collectionName)
/**
* A view for activations in a namespace additionally keyed by action name
* (and package name if present) sorted by date.
*/
lazy val filtersView = WhiskEntityQueries.view(filtersDdoc, collectionName)
override implicit val serdes = jsonFormat13(WhiskActivation.apply)
// Caching activations doesn't make much sense in the common case as usually,
// an activation is only asked for once.
override val cacheEnabled = false
/**
* Queries datastore for activation records which have an entity name matching the
* given parameter.
*
* @return list of records as JSON object if docs parameter is false, as Left
* and a list of the WhiskActivations if including the full record, as Right
*/
def listActivationsMatchingName(db: ArtifactStore[WhiskActivation],
namespace: EntityPath,
path: EntityPath,
skip: Int,
limit: Int,
includeDocs: Boolean = false,
since: Option[Instant] = None,
upto: Option[Instant] = None,
stale: StaleParameter = StaleParameter.No)(
implicit transid: TransactionId): Future[Either[List[JsObject], List[WhiskActivation]]] = {
import WhiskEntityQueries.TOP
val convert = if (includeDocs) Some((o: JsObject) => Try { serdes.read(o) }) else None
val startKey = List(namespace.addPath(path).asString, since map { _.toEpochMilli } getOrElse 0)
val endKey = List(namespace.addPath(path).asString, upto map { _.toEpochMilli } getOrElse TOP, TOP)
query(db, filtersView, startKey, endKey, skip, limit, reduce = false, stale, convert)
}
def put[Wsuper >: WhiskActivation](db: ArtifactStore[Wsuper], doc: WhiskActivation)(
implicit transid: TransactionId,
notifier: Option[CacheChangeNotification]): Future[DocInfo] =
//As activations are not updated we just pass None for the old document
super.put(db, doc, None)
}
|
starpit/openwhisk
|
common/scala/src/main/scala/org/apache/openwhisk/core/entity/WhiskActivation.scala
|
Scala
|
apache-2.0
| 8,468 |
package com.eevolution.context.dictionary.infrastructure.repository
import java.util.UUID
import com.eevolution.context.dictionary.domain._
import com.eevolution.context.dictionary.domain.model.WorkflowNextCondition
import com.eevolution.context.dictionary.infrastructure.db.DbContext._
import com.eevolution.utils.PaginatedSequence
import com.lightbend.lagom.scaladsl.persistence.jdbc.JdbcSession
import scala.concurrent.{ExecutionContext, Future}
/**
* Copyright (C) 2003-2017, e-Evolution Consultants S.A. , http://www.e-evolution.com
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
* Email: [email protected], http://www.e-evolution.com , http://github.com/e-Evolution
* Created by [email protected] , www.e-evolution.com
*/
/**
* Workflow Next Condition Repository
* @param session
* @param executionContext
*/
class WorkflowNextConditionRepository (session: JdbcSession)(implicit executionContext: ExecutionContext)
extends api.repository.WorkflowNextConditionRepository[WorkflowNextCondition , Int]
with WorkflowNextConditionMapping {
def getById(id: Int): Future[WorkflowNextCondition] = {
Future(run(queryWorkflowNextCondition.filter(_.workflowNextConditionId == lift(id))).headOption.get)
}
def getByUUID(uuid: UUID): Future[WorkflowNextCondition] = {
Future(run(queryWorkflowNextCondition.filter(_.uuid == lift(uuid.toString))).headOption.get)
}
def getByWorkflowNextConditionId(id : Int) : Future[List[WorkflowNextCondition]] = {
Future(run(queryWorkflowNextCondition))
}
def getAll() : Future[List[WorkflowNextCondition]] = {
Future(run(queryWorkflowNextCondition))
}
def getAllByPage(page: Int, pageSize: Int): Future[PaginatedSequence[WorkflowNextCondition]] = {
val offset = page * pageSize
val limit = (page + 1) * pageSize
for {
count <- countWorkflowNextCondition()
elements <- if (offset > count) Future.successful(Nil)
else selectWorkflowNextCondition(offset, limit)
} yield {
PaginatedSequence(elements, page, pageSize, count)
}
}
private def countWorkflowNextCondition() = {
Future(run(queryWorkflowNextCondition.size).toInt)
}
private def selectWorkflowNextCondition(offset: Int, limit: Int): Future[Seq[WorkflowNextCondition]] = {
Future(run(queryWorkflowNextCondition).drop(offset).take(limit).toSeq)
}
}
|
adempiere/ADReactiveSystem
|
dictionary-impl/src/main/scala/com/eevolution/context/dictionary/infrastructure/repository/WorkflowNextConditionRepository.scala
|
Scala
|
gpl-3.0
| 2,991 |
package edu.holycross.shot.ohco2
import edu.holycross.shot.cite._
/** Association of a CtsUrn with labelling information
* from a text catalog.
*
* @param urn The CtsUrn.
* @param label A label for it.
*/
case class LabelledCtsUrn(urn: CtsUrn, label: String) {}
|
cite-architecture/ohco2
|
shared/src/main/scala/edu/holycross/shot/ohco2/LabelledCtsUrn.scala
|
Scala
|
gpl-3.0
| 263 |
/*
* The MIT License (MIT)
*
* Copyright (c) 2016 by Lloyd Chan
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*
* This is from: https://github.com/lloydmeta/enumeratum
*/
package fm.common
import scala.collection.immutable._
import scala.language.experimental.macros
/**
* Base trait for a Value-based enums.
*
* Example:
*
* {{{
* scala> sealed abstract class Greeting(val value: Int) extends IntEnumEntry
*
* scala> object Greeting extends IntEnum[Greeting] {
* | val values = findValues
* | case object Hello extends Greeting(1)
* | case object GoodBye extends Greeting(2)
* | case object Hi extends Greeting(3)
* | case object Bye extends Greeting(4)
* | }
*
* scala> Greeting.withValueOpt(1)
* res0: Option[Greeting] = Some(Hello)
*
* scala> Greeting.withValueOpt(6)
* res1: Option[Greeting] = None
* }}}
*/
sealed trait ValueEnum[ValueType, EntryType <: ValueEnumEntry[ValueType]] {
/**
* Map of [[ValueType]] to [[EntryType]] members
*/
final lazy val valuesToEntriesMap: Map[ValueType, EntryType] =
values.map(v => v.value -> v).toMap
/**
* The sequence of values for your [[Enum]]. You will typically want
* to implement this in your extending class as a `val` so that `withValue`
* and friends are as efficient as possible.
*
* Feel free to implement this however you'd like (including messing around with ordering, etc) if that
* fits your needs better.
*/
def values: IndexedSeq[EntryType]
/**
* Tries to get an [[EntryType]] by the supplied value. The value corresponds to the .value
* of the case objects implementing [[EntryType]]
*
* Like [[Enumeration]]'s `withValue`, this method will throw if the value does not match any of the values'
* `.value` values.
*/
@SuppressWarnings(Array("org.wartremover.warts.Throw"))
def withValue(i: ValueType): EntryType =
withValueOpt(i).getOrElse(throw new NoSuchElementException(buildNotFoundMessage(i)))
/**
* Optionally returns an [[EntryType]] for a given value.
*/
def withValueOpt(i: ValueType): Option[EntryType] = valuesToEntriesMap.get(i)
private lazy val existingEntriesString = values.map(_.value).mkString(", ")
private def buildNotFoundMessage(i: ValueType): String = {
s"$i is not a member of ValueEnum ($existingEntriesString)"
}
}
/*
* For the sake of keeping implementations of ValueEnums constrainted to a subset that we have tested to work relatively well,
* the following traits are implementations of the sealed trait.
*
* There is a bit of repetition in order to supply the findValues method (esp in the comments) because we are using a macro
* and macro invocations cannot provide implementations for a super class's abstract method
*/
object IntEnum {
/**
* Materializes an IntEnum for a given IntEnumEntry
*/
implicit def materialiseIntValueEnum[EntryType <: IntEnumEntry]: IntEnum[EntryType] =
macro EnumMacros.materializeEnumImpl[EntryType]
}
/**
* Value enum with [[IntEnumEntry]] entries
*/
trait IntEnum[A <: IntEnumEntry] extends ValueEnum[Int, A] {
/**
* Method that returns a Seq of [[A]] objects that the macro was able to find.
*
* You will want to use this in some way to implement your [[values]] method. In fact,
* if you aren't using this method...why are you even bothering with this lib?
*/
protected def findValues: IndexedSeq[A] = macro ValueEnumMacros.findIntValueEntriesImpl[A]
}
object LongEnum {
/**
* Materializes a LongEnum for an scope LongEnumEntry
*/
implicit def materialiseLongValueEnum[EntryType <: LongEnumEntry]: LongEnum[EntryType] =
macro EnumMacros.materializeEnumImpl[EntryType]
}
/**
* Value enum with [[LongEnumEntry]] entries
*/
trait LongEnum[A <: LongEnumEntry] extends ValueEnum[Long, A] {
/**
* Method that returns a Seq of [[A]] objects that the macro was able to find.
*
* You will want to use this in some way to implement your [[values]] method. In fact,
* if you aren't using this method...why are you even bothering with this lib?
*/
final protected def findValues: IndexedSeq[A] = macro ValueEnumMacros.findLongValueEntriesImpl[A]
}
object ShortEnum {
/**
* Materializes a ShortEnum for an in-scope ShortEnumEntry
*/
implicit def materialiseShortValueEnum[EntryType <: ShortEnumEntry]: ShortEnum[EntryType] =
macro EnumMacros.materializeEnumImpl[EntryType]
}
/**
* Value enum with [[ShortEnumEntry]] entries
*/
trait ShortEnum[A <: ShortEnumEntry] extends ValueEnum[Short, A] {
/**
* Method that returns a Seq of [[A]] objects that the macro was able to find.
*
* You will want to use this in some way to implement your [[values]] method. In fact,
* if you aren't using this method...why are you even bothering with this lib?
*/
final protected def findValues: IndexedSeq[A] =
macro ValueEnumMacros.findShortValueEntriesImpl[A]
}
object StringEnum {
/**
* Materializes a StringEnum for an in-scope StringEnumEntry
*/
implicit def materialiseStringValueEnum[EntryType <: StringEnumEntry]: StringEnum[EntryType] =
macro EnumMacros.materializeEnumImpl[EntryType]
}
/**
* Value enum with [[StringEnumEntry]] entries
*
* This is similar to [[enumeratum.Enum]], but different in that values must be
* literal values. This restraint allows us to enforce uniqueness at compile time.
*
* Note that uniqueness is only guaranteed if you do not do any runtime string manipulation on values.
*/
trait StringEnum[A <: StringEnumEntry] extends ValueEnum[String, A] {
/**
* Method that returns a Seq of [[A]] objects that the macro was able to find.
*
* You will want to use this in some way to implement your [[values]] method. In fact,
* if you aren't using this method...why are you even bothering with this lib?
*/
final protected def findValues: IndexedSeq[A] =
macro ValueEnumMacros.findStringValueEntriesImpl[A]
}
object ByteEnum {
/**
* Materializes a ByteEnum for an in-scope ByteEnumEntry
*/
implicit def materialiseByteValueEnum[EntryType <: ByteEnumEntry]: ByteEnum[EntryType] =
macro EnumMacros.materializeEnumImpl[EntryType]
}
/**
* Value enum with [[ByteEnumEntry]] entries
*
* This is similar to [[enumeratum.Enum]], but different in that values must be
* literal values. This restraint allows us to enforce uniqueness at compile time.
*
* Note that uniqueness is only guaranteed if you do not do any runtime string manipulation on values.
*/
trait ByteEnum[A <: ByteEnumEntry] extends ValueEnum[Byte, A] {
/**
* Method that returns a Seq of [[A]] objects that the macro was able to find.
*
* You will want to use this in some way to implement your [[values]] method. In fact,
* if you aren't using this method...why are you even bothering with this lib?
*/
final protected def findValues: IndexedSeq[A] = macro ValueEnumMacros.findByteValueEntriesImpl[A]
}
object CharEnum {
/**
* Materializes a CharEnum for an in-scope CharEnumEntry
*/
implicit def materialiseCharValueEnum[EntryType <: CharEnumEntry]: CharEnum[EntryType] =
macro EnumMacros.materializeEnumImpl[EntryType]
}
/**
* Value enum with [[CharEnumEntry]] entries
*
* This is similar to [[enumeratum.Enum]], but different in that values must be
* literal values. This restraint allows us to enforce uniqueness at compile time.
*
* Note that uniqueness is only guaranteed if you do not do any runtime string manipulation on values.
*/
trait CharEnum[A <: CharEnumEntry] extends ValueEnum[Char, A] {
/**
* Method that returns a Seq of [[A]] objects that the macro was able to find.
*
* You will want to use this in some way to implement your [[values]] method. In fact,
* if you aren't using this method...why are you even bothering with this lib?
*/
final protected def findValues: IndexedSeq[A] = macro ValueEnumMacros.findCharValueEntriesImpl[A]
}
|
frugalmechanic/fm-common
|
shared/src/main/scala/fm/common/ValueEnum.scala
|
Scala
|
apache-2.0
| 9,003 |
/*
* Copyright (C)2014 D. Plaindoux.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU Lesser General Public License as published
* by the Free Software Foundation; either version 2, or (at your option) any
* later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program; see the file COPYING. If not, write to
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
*/
package smallibs.rapido.page
import scala.Some
import smallibs.page.{Provider, DataProvider}
import smallibs.rapido.lang.ast._
trait AbstractProvider {
self: DataProvider =>
val keys: List[String]
def values: List[DataProvider] =
for (n <- keys if self.get(n) != None) yield self.get(n).get
def set(name: String, data: DataProvider): DataProvider = throw new IllegalAccessException
}
class EntitiesProvider(elements: List[Entity]) extends DataProvider with AbstractProvider {
val keys = List("services", "routes", "clients", "types")
def get(name: String): Option[DataProvider] = {
val types = {
for (e <- elements if e.isInstanceOf[TypeEntity]) yield {
val entity = e.asInstanceOf[TypeEntity]
(entity.name, entity.definition)
}
}.toMap
name match {
case "services" =>
Some(Provider.set(
for (e <- elements if e.isInstanceOf[ServiceEntity])
yield {
val service = e.asInstanceOf[ServiceEntity]
new ServiceProvider(service, new RouteProvider(service.route, types), types)
})
)
case "routes" =>
Some(Provider.set(
for (e <- elements if e.isInstanceOf[ServiceEntity])
yield new RouteProvider(e.asInstanceOf[ServiceEntity].route, types))
)
case "clients" =>
Some(Provider.set(
for (e <- elements if e.isInstanceOf[ClientEntity])
yield new ClientProvider(e.asInstanceOf[ClientEntity], this))
)
case "types" =>
Some(Provider.set(
for (e <- elements if e.isInstanceOf[TypeEntity])
yield new TypeDefinitionProvider(e.asInstanceOf[TypeEntity], types.toMap))
)
case _ => None
}
}
}
class ServiceProvider(service: ServiceEntity, route: DataProvider, types: Map[String, Type]) extends DataProvider with AbstractProvider {
val keys = List("name", "entries", "route")
def get(name: String): Option[DataProvider] =
name match {
case "name" => Some(Provider.constant(service.name))
case "entries" => Some(Provider.set(for (entry <- service.entries) yield new EntryProvider(entry, types)))
case "route" => Some(route)
case _ => None
}
}
class RouteProvider(route: Route, types: Map[String, Type]) extends DataProvider with AbstractProvider {
val keys = List("name", "params", "path")
def get(name: String): Option[DataProvider] =
name match {
case "name" => Some(Provider.constant(route.name))
case "params" =>
val params = route.params.foldLeft[(Int, List[(Int, Type)])](0, Nil)((i, t) => (i._1 + 1, i._2 ++ List((i._1, t))))
Some(Provider.set(for ((i, e) <- params._2) yield new ParamProvider((f"sp_$i%d", e), types)))
case "path" => Some(new PathProvider(route.path))
case _ => None
}
}
class ParamProvider(param: (String, Type), types: Map[String, Type]) extends DataProvider with AbstractProvider {
val keys = List("name", "type")
def get(name: String): Option[DataProvider] =
name match {
case "name" => Some(Provider.constant(param._1))
case "type" => Some(new TypeProvider(param._2, types))
case _ => None
}
}
class PathProvider(path: Path) extends DataProvider with AbstractProvider {
val keys = List("values")
def get(name: String): Option[DataProvider] =
name match {
case "values" =>
Some(Provider.set(
for (p <- path.values)
yield p match {
case s@StaticLevel(_) => new StaticPathProvider(s)
case d@DynamicLevel(_) => new DynamicPathProvider(d)
}
))
case _ => None
}
}
class StaticPathProvider(path: StaticLevel) extends DataProvider with AbstractProvider {
val keys = List("name", "type")
def get(name: String): Option[DataProvider] =
name match {
case "name" => Some(Provider.constant(path.name))
case _ => None
}
}
class DynamicPathProvider(param: DynamicLevel) extends DataProvider with AbstractProvider {
val keys = List("name", "type")
def get(name: String): Option[DataProvider] =
name match {
case "object" => Some(Provider.constant(param.values.head))
case "fields" => Some(Provider.set(for (param <- param.values.tail) yield Provider.constant(param)))
case _ => None
}
}
class ClientProvider(client: ClientEntity, entities: EntitiesProvider) extends DataProvider with AbstractProvider {
val keys = List("name", "provides")
def get(name: String): Option[DataProvider] =
name match {
case "name" => Some(Provider.constant(client.name))
case "provides" =>
Some(Provider.set(
for (name <- client.provides;
service <- entities.get("services").get.values
if service.get("name").get.toString.equals(name))
yield service)
)
case _ => None
}
}
class TypeDefinitionProvider(kind: TypeEntity, types: Map[String, Type]) extends DataProvider with AbstractProvider {
val keys = List("name", "definition")
def get(name: String): Option[DataProvider] =
name match {
case "name" => Some(Provider.constant(kind.name))
case "definition" => Some(new TypeProvider(kind.definition, types))
case _ => None
}
}
case class TypeProvider(aType: Type, types: Map[String, Type]) extends DataProvider with AbstractProvider {
val keys = List("name", "bool", "int", "string", "opt", "array", "object")
def deref(t: Type): Option[Type] =
t match {
case TypeIdentifier(n) =>
(types get n) flatMap deref
case TypeComposed(l, r) =>
(deref(l), deref(r)) match {
case (Some(TypeObject(l)), Some(TypeObject(r))) => Some(TypeObject(l ++ r))
case _ => None
}
case _ => Some(t)
}
def get(name: String): Option[DataProvider] =
(name, aType, deref(aType)) match {
case ("name", TypeIdentifier(name), _) => Some(Provider.constant(name))
case ("bool", _, Some(TypeBoolean)) => Some(Provider.constant("bool"))
case ("int", _, Some(TypeNumber)) => Some(Provider.constant("int"))
case ("string", _, Some(TypeString)) => Some(Provider.constant("string"))
case ("opt", _, Some(TypeOptional(t))) => Some(new TypeProvider(t, types))
case ("array", _, Some(TypeMultiple(t))) => Some(new TypeProvider(t, types))
case ("object", _, Some(TypeObject(values))) => Some(new TypeObjectProvider(values, types))
case _ => None
}
}
case class TypeObjectProvider(definitions: Map[String, TypeAttribute], types: Map[String, Type]) extends DataProvider with AbstractProvider {
val keys = List("attributes", "virtual")
def get(name: String): Option[DataProvider] =
name match {
case "attributes" =>
val concrete = for ((n, t) <- definitions if t.isInstanceOf[ConcreteTypeAttribute]) yield (n, t)
val attributes = for ((n, ConcreteTypeAttribute(a, t)) <- concrete) yield new TypeAttributeProvider(n, a, t, types)
Some(Provider.set(attributes.toList))
case "virtual" =>
val virtual = for ((n, t) <- definitions if t.isInstanceOf[VirtualTypeAttribute]) yield (n, t)
val attributes = for ((n, VirtualTypeAttribute(p)) <- virtual) yield new TypeVirtualAttributeProvider(n, p)
Some(Provider.set(attributes.toList))
case _ => None
}
}
class TypeAttributeProvider(aName: String, access: Option[Access], aType: Type, types: Map[String, Type]) extends DataProvider with AbstractProvider {
val keys = List("name", "type")
def get(name: String): Option[DataProvider] =
name match {
case "name" => Some(Provider.constant(aName))
case "get" => access flatMap {
case GetAccess(n) => Some(Provider.constant(n.getOrElse(aName)))
case _ => None
}
case "set" => access flatMap {
case SetAccess(n) => Some(Provider.constant(n.getOrElse(aName)))
case _ => None
}
case "set_get" => access flatMap {
case SetGetAccess(n) => Some(Provider.constant(n.getOrElse(aName)))
case _ => None
}
case "type" => Some(new TypeProvider(aType, types))
case _ => None
}
}
class TypeVirtualAttributeProvider(aName: String, path: Path) extends DataProvider with AbstractProvider {
val keys = List("name", "values")
def get(name: String): Option[DataProvider] =
name match {
case "name" => Some(Provider.constant(aName))
case "values" => new PathProvider(path) get "values"
case _ => None
}
}
class EntryProvider(entry: Service, types: Map[String, Type]) extends DataProvider with AbstractProvider {
val keys = List("name", "operation", "signature", "path", "params", "body", "header")
def get(name: String): Option[DataProvider] =
name match {
case "name" => Some(Provider.constant(entry.name))
case "operation" => Some(Provider.constant(entry.action.operation.toString))
case "signature" => Some(new ServiceTypeProvider(entry.signature, types))
case "path" => for (p <- entry.action.path) yield new PathProvider(p)
case "params" => for (b <- entry.action.params) yield TypeProvider(b, types)
case "body" => for (b <- entry.action.body) yield TypeProvider(b, types)
case "header" => for (b <- entry.action.header) yield TypeProvider(b, types)
case _ => None
}
}
class ServiceTypeProvider(serviceType: ServiceType, types: Map[String, Type]) extends DataProvider with AbstractProvider {
val keys = List()
def get(name: String): Option[DataProvider] =
name match {
case "inputs" =>
val params = serviceType.inputs.foldLeft[(Int, List[(Int, Type)])](0, Nil)((i, t) => (i._1 + 1, i._2 ++ List((i._1, t))))
Some(Provider.set(for ((i, e) <- params._2) yield new ParamProvider((f"fp_$i%d", e), types)))
case "output" => Some(TypeProvider(serviceType.output, types))
case _ => None
}
}
//
// Main provider entry point
//
object RapidoProvider {
def entities(elements: List[Entity]): DataProvider = new EntitiesProvider(elements)
}
|
d-plaindoux/rapido
|
src/main/scala/smallibs/rapido/page/provider.scala
|
Scala
|
lgpl-2.1
| 10,846 |
/**
* Copyright 2015 Mohiva Organisation (license at mohiva dot com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.mohiva.play.silhouette.impl.providers.openid
import com.mohiva.play.silhouette.api.LoginInfo
import com.mohiva.play.silhouette.impl.providers._
import play.api.test.WithApplication
/**
* Test case for the [[YahooProvider]] class.
*/
class YahooProviderSpec extends OpenIDProviderSpec {
"The `withSettings` method" should {
"create a new instance with customized settings" in new WithApplication with Context {
val overrideSettingsFunction: OpenIDSettings => OpenIDSettings = { s =>
s.copy("new-provider-url")
}
val s = provider.withSettings(overrideSettingsFunction)
s.settings.providerURL must be equalTo "new-provider-url"
there was one(openIDService).withSettings(overrideSettingsFunction)
}
}
"The `retrieveProfile` method" should {
"return the social profile" in new WithApplication with Context {
profile(provider.retrieveProfile(openIDInfo)) {
case p =>
p must be equalTo new CommonSocialProfile(
loginInfo = LoginInfo(provider.id, "https://me.yahoo.com/a/Xs6hPjazdrMvmbn4jhQjkjkhcasdGdsKajq9we"),
fullName = Some("Apollonia Vanova"),
email = Some("[email protected]"),
avatarURL = Some("https://s.yimg.com/dh/ap/social/profile/profile_b48.png")
)
}
}
}
/**
* Defines the context for the abstract OpenID provider spec.
*
* @return The Context to use for the abstract OpenID provider spec.
*/
override protected def context: OpenIDProviderSpecContext = new Context {}
/**
* The context.
*/
trait Context extends OpenIDProviderSpecContext {
/**
* A OpenID info.
*/
override lazy val openIDInfo = OpenIDInfo("https://me.yahoo.com/a/Xs6hPjazdrMvmbn4jhQjkjkhcasdGdsKajq9we", Map(
"fullname" -> "Apollonia Vanova",
"email" -> "[email protected]",
"image" -> "https://s.yimg.com/dh/ap/social/profile/profile_b48.png"
))
/**
* The OpenID settings.
*/
lazy val openIDSettings = spy(OpenIDSettings(
providerURL = "https://me.yahoo.com/",
callbackURL = "http://localhost:9000/authenticate/yahoo",
axRequired = Map(
"fullname" -> "http://axschema.org/namePerson",
"email" -> "http://axschema.org/contact/email",
"image" -> "http://axschema.org/media/image/default"
),
realm = Some("http://localhost:9000")
))
/**
* The provider to test.
*/
lazy val provider = new YahooProvider(httpLayer, openIDService, openIDSettings)
}
}
|
mohiva/play-silhouette
|
silhouette/test/com/mohiva/play/silhouette/impl/providers/openid/YahooProviderSpec.scala
|
Scala
|
apache-2.0
| 3,211 |
def sequence[A](fas: List[F[A]]): F[List[A]] =
traverse(fas)(fa => fa)
def replicateM[A](n: Int, fa: F[A]): F[List[A]] =
sequence(List.fill(n)(fa))
def product[A,B](fa: F[A], fb: F[B]): F[(A,B)] =
map2(fa, fb)((_,_))
|
lucaviolanti/scala-redbook
|
answerkey/applicative/01.answer.scala
|
Scala
|
mit
| 231 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package dbis.pig.op
import java.security.MessageDigest
import dbis.pig.plan.InvalidPlanException
import dbis.pig.schema._
import org.kiama.rewriting.Rewritable
import scala.collection.immutable.Seq
import scala.collection.mutable.Map
import dbis.pig.expr.NamedField
import dbis.pig.expr.Ref
/**
* PigOperator is the base trait for all Pig operators. An operator contains
* pipes representing the input and output connections to other operators in the
* dataflow.
*/
abstract class PigOperator(
private[op] var _outputs: List[Pipe],
private[op] var _inputs: List[Pipe],
var schema: Option[Schema] = None
) extends Rewritable with Serializable {
def this(out: Pipe) = this(List(out), List(), None)
def this(out: Pipe, in: Pipe) = this(List(out), List(in), None)
/**
* A map of key-value pairs representing operator-specific parameters.
*/
var configParams: Map[String, Any] = Map()
/**
* The (optional) schema describing the output produced by the operator.
*/
// var schema: Option[Schema] = None
/**
* Getter method for the output pipes.
*
* @return the list of output pipes
*/
def outputs = _outputs
/**
* Setter method for the output pipes. It ensures
* that this is producer of all pipes.
*
* @param o the new list of output pipes
*/
def outputs_=(o: List[Pipe]) {
_outputs = o
// 1. make sure we don't have multiple pipes with the same name
if (_outputs.map(p => p.name).distinct.size != _outputs.size)
throw InvalidPlanException("duplicate pipe names")
// 2. make sure that we are producer in all pipes
_outputs.foreach(p => {
p.producer = this
p.consumer.foreach(_.inputs.foreach(
_.producer = this
))
})
}
/**
* Getter method for the input pipes.
*
* @return the list of input pipes
*/
def inputs = _inputs
/**
* Setter method for the input pipes. It ensures
* that this is a consumer in all pipes.
*
* @param i the new list of input pipes
*/
def inputs_=(i: List[Pipe]) = {
_inputs = i
// make sure that we are consumer in all pipes
_inputs.foreach(p => if (!p.consumer.contains(this)) p.consumer = p.consumer :+ this)
}
def outPipeName: String = if (outputs.nonEmpty) outputs.head.name else ""
def outPipeNames: List[String] = outputs.map(p => p.name)
def inPipeName: String = if (inputs.nonEmpty) inputs.head.name else ""
def inPipeNames: List[String] = inputs.map(p => p.name)
/**
* Checks whether the pipe names are valid identifiers. If not an exception is raised.
*/
def checkPipeNames: Unit = {
def validPipeName(s: String) = if (!s.matches("""[a-zA-Z_]\\w*""")) throw InvalidPipeNameException(s)
outputs.foreach(p => validPipeName(p.name))
inputs.foreach(p => validPipeName(p.name))
}
def inputSchema = if (inputs.nonEmpty) inputs.head.inputSchema else None
def preparePlan: Unit = {}
/**
* Try to replace all pipes/references with a leading $ via the mapping table.
*
* @param mapping a map from identifiers to values
*/
def resolveParameters(mapping: Map[String, Ref]): Unit = {
def rename(p: Pipe): Unit = {
if (p.name.startsWith("$") && mapping.contains(p.name)) {
val s2 = mapping(p.name) match {
case NamedField(n, _) => n
case _ => p.name
}
p.name = s2
}
}
/*
* We resolve only the pipe names here.
*/
outputs.foreach(p => rename(p))
inputs.foreach(p => rename(p))
/*
* This method has to be overriden by the subclasses.
*/
resolveReferences(mapping)
}
/**
* Try to replace all references in expressions with a leading $ via the mapping table.
*
* @param mapping a map from identifiers to values
*/
def resolveReferences(mapping: Map[String, Ref]): Unit = {}
def checkConnectivity: Boolean = true
/**
* Add an operator as a consumer to the output pipe with the given name.
*
* @param name the name of the output pipe
* @param op the operator instance
*/
def addConsumer(name: String, op: PigOperator): Unit = {
_outputs.find(_.name == name) match {
case Some(p) => if (!p.consumer.contains(op)) p.consumer = p.consumer :+ op
case None => {}
}
}
/**
* Constructs the output schema of this operator based on the input + the semantics of the operator.
* The default implementation is to simply take over the schema of the input operator.
*
* @return the output schema
*/
def constructSchema: Option[Schema] = {
if (inputs.nonEmpty) {
schema = inputs.head.producer.schema
}
schema
}
/**
* Returns a string representation of the output schema of the operator.
*
* @return a string describing the schema
*/
def schemaToString: String = {
/*
* schemaToString is mainly called from DESCRIBE. Thus, we can take outPipeName as relation name.
*/
schema match {
case Some(s) => s"$outPipeName: ${s.element.descriptionString}"
case None => s"Schema for '$outPipeName' unknown."
}
}
/**
* A helper function for traversing expression trees:
*
* Checks the (named) fields referenced in the expression (if any) if they conform to
* the schema. Should be overridden in operators changing the schema by invoking
* traverse with one of the traverser function.
*
* @return true if valid field references, otherwise false
*/
def checkSchemaConformance: Boolean = true
/**
* Returns a MD5 hash string representing the sub-plan producing the input for this operator.
*
* @return the MD5 hash string
*/
def lineageSignature: String = {
val digest = MessageDigest.getInstance("MD5")
digest.digest(lineageString.getBytes).map("%02x".format(_)).mkString
}
/**
* Returns the lineage string describing the sub-plan producing the input for this operator.
*
* @return a string representation of the sub-plan.
*/
def lineageString: String = {
inputs.map(p => p.producer.lineageString).mkString("%")
}
/**
* Check whether the input and output pipes are still consistent, i.e.
* for all output pipes the producer is the current operator and the current
* operator is also a consumer in each input pipe.
*
* @return true if the operator pipes are consistent
*/
def checkConsistency: Boolean = {
outputs.forall(p => p.producer == this) && inputs.forall(p => p.consumer.contains(this))
}
/**
* Returns the arity, i.e. the number of output pipes of
* this operator.
*
* @return the arity of the operator
*/
def arity = {
var numConsumers = 0
this.outputs.foreach(p => numConsumers += p.consumer.length)
numConsumers
}
def deconstruct: List[PigOperator] = this.outputs.flatMap(_.consumer)
def reconstruct(outputs: Seq[Any]): PigOperator = {
val outname = this.outPipeName
reconstruct(outputs, outname)
}
/** Implementation for kiamas Rewritable trait
*
* It's necessary to set the `outputs` attribute on this object to List.empty, which makes `this.outPipeName`
* return "". To work around this, the output name can be provided via `outname`.
*
* @param outputs
* @param outname The output name of this relation
* @return
*/
def reconstruct(outputs: Seq[Any], outname: String): PigOperator = {
this.outputs = List.empty
outputs.foreach {
case op: PigOperator =>
val idx = this.outputs.indexWhere(_.name == outname)
if (idx > -1) {
// There is already a pipe to `outname`
this.outputs(idx).consumer = this.outputs(idx).consumer :+ op
} else {
this.outputs = this.outputs :+ Pipe(outname, this, List(op))
}
// Some rewriting rules turn one operator into multiple ones, for example Split Into into multiple Filter
// operators
case ops: Seq[_] => this.reconstruct(ops, outname)
case (op : PigOperator, _) => this.reconstruct(List(op), outname)
case _ => illegalArgs("PigOperator", "PigOperator", outputs)
}
this
}
/**
* Returns a string of whitespaces for indenting a line by the given number.
*
* @param tab number of tabs to indent
* @return a string with whitespaces
*/
def indent(tab: Int): String = new String((for (i <-1 to tab) yield ' ').toArray)
/**
* Prints a description of the operator to standard output but indent it by the given
* number of characters.
* Note this method is used to pretty print a execution plan.
*
* @param tab the number of characters for indenting the output
*/
def printOperator(tab: Int): Unit = {
println(indent(tab) + this.toString + s" { out = ${outPipeNames.mkString(",")} , in = ${inPipeNames.mkString(",")} }")
}
}
|
ksattler/piglet
|
src/main/scala/dbis/pig/op/PigOperator.scala
|
Scala
|
apache-2.0
| 9,634 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql
import org.apache.spark.sql.catalyst.plans.PlanTest
import org.apache.spark.sql.catalyst.plans.logical._
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.test.SharedSQLContext
class JoinHintSuite extends PlanTest with SharedSQLContext {
import testImplicits._
lazy val df = spark.range(10)
lazy val df1 = df.selectExpr("id as a1", "id as a2")
lazy val df2 = df.selectExpr("id as b1", "id as b2")
lazy val df3 = df.selectExpr("id as c1", "id as c2")
def verifyJoinHint(df: DataFrame, expectedHints: Seq[JoinHint]): Unit = {
val optimized = df.queryExecution.optimizedPlan
val joinHints = optimized collect {
case Join(_, _, _, _, hint) => hint
case _: ResolvedHint => fail("ResolvedHint should not appear after optimize.")
}
assert(joinHints == expectedHints)
}
test("single join") {
verifyJoinHint(
df.hint("broadcast").join(df, "id"),
JoinHint(
Some(HintInfo(broadcast = true)),
None) :: Nil
)
verifyJoinHint(
df.join(df.hint("broadcast"), "id"),
JoinHint(
None,
Some(HintInfo(broadcast = true))) :: Nil
)
}
test("multiple joins") {
verifyJoinHint(
df1.join(df2.hint("broadcast").join(df3, 'b1 === 'c1).hint("broadcast"), 'a1 === 'c1),
JoinHint(
None,
Some(HintInfo(broadcast = true))) ::
JoinHint(
Some(HintInfo(broadcast = true)),
None) :: Nil
)
verifyJoinHint(
df1.hint("broadcast").join(df2, 'a1 === 'b1).hint("broadcast").join(df3, 'a1 === 'c1),
JoinHint(
Some(HintInfo(broadcast = true)),
None) ::
JoinHint(
Some(HintInfo(broadcast = true)),
None) :: Nil
)
}
test("hint scope") {
withTempView("a", "b", "c") {
df1.createOrReplaceTempView("a")
df2.createOrReplaceTempView("b")
verifyJoinHint(
sql(
"""
|select /*+ broadcast(a, b)*/ * from (
| select /*+ broadcast(b)*/ * from a join b on a.a1 = b.b1
|) a join (
| select /*+ broadcast(a)*/ * from a join b on a.a1 = b.b1
|) b on a.a1 = b.b1
""".stripMargin),
JoinHint(
Some(HintInfo(broadcast = true)),
Some(HintInfo(broadcast = true))) ::
JoinHint(
None,
Some(HintInfo(broadcast = true))) ::
JoinHint(
Some(HintInfo(broadcast = true)),
None) :: Nil
)
}
}
test("hints prevent join reorder") {
withSQLConf(SQLConf.CBO_ENABLED.key -> "true", SQLConf.JOIN_REORDER_ENABLED.key -> "true") {
withTempView("a", "b", "c") {
df1.createOrReplaceTempView("a")
df2.createOrReplaceTempView("b")
df3.createOrReplaceTempView("c")
verifyJoinHint(
sql("select /*+ broadcast(a, c)*/ * from a, b, c " +
"where a.a1 = b.b1 and b.b1 = c.c1"),
JoinHint(
None,
Some(HintInfo(broadcast = true))) ::
JoinHint(
Some(HintInfo(broadcast = true)),
None) :: Nil
)
verifyJoinHint(
sql("select /*+ broadcast(a, c)*/ * from a, c, b " +
"where a.a1 = b.b1 and b.b1 = c.c1"),
JoinHint.NONE ::
JoinHint(
Some(HintInfo(broadcast = true)),
Some(HintInfo(broadcast = true))) :: Nil
)
verifyJoinHint(
sql("select /*+ broadcast(b, c)*/ * from a, c, b " +
"where a.a1 = b.b1 and b.b1 = c.c1"),
JoinHint(
None,
Some(HintInfo(broadcast = true))) ::
JoinHint(
None,
Some(HintInfo(broadcast = true))) :: Nil
)
verifyJoinHint(
df1.join(df2, 'a1 === 'b1 && 'a1 > 5).hint("broadcast")
.join(df3, 'b1 === 'c1 && 'a1 < 10),
JoinHint(
Some(HintInfo(broadcast = true)),
None) ::
JoinHint.NONE :: Nil
)
verifyJoinHint(
df1.join(df2, 'a1 === 'b1 && 'a1 > 5).hint("broadcast")
.join(df3, 'b1 === 'c1 && 'a1 < 10)
.join(df, 'b1 === 'id),
JoinHint.NONE ::
JoinHint(
Some(HintInfo(broadcast = true)),
None) ::
JoinHint.NONE :: Nil
)
}
}
}
test("intersect/except") {
val dfSub = spark.range(2)
verifyJoinHint(
df.hint("broadcast").except(dfSub).join(df, "id"),
JoinHint(
Some(HintInfo(broadcast = true)),
None) ::
JoinHint.NONE :: Nil
)
verifyJoinHint(
df.join(df.hint("broadcast").intersect(dfSub), "id"),
JoinHint(
None,
Some(HintInfo(broadcast = true))) ::
JoinHint.NONE :: Nil
)
}
test("hint merge") {
verifyJoinHint(
df.hint("broadcast").filter('id > 2).hint("broadcast").join(df, "id"),
JoinHint(
Some(HintInfo(broadcast = true)),
None) :: Nil
)
verifyJoinHint(
df.join(df.hint("broadcast").limit(2).hint("broadcast"), "id"),
JoinHint(
None,
Some(HintInfo(broadcast = true))) :: Nil
)
}
test("nested hint") {
verifyJoinHint(
df.hint("broadcast").hint("broadcast").filter('id > 2).join(df, "id"),
JoinHint(
Some(HintInfo(broadcast = true)),
None) :: Nil
)
}
test("hints prevent cost-based join reorder") {
withSQLConf(SQLConf.CBO_ENABLED.key -> "true", SQLConf.JOIN_REORDER_ENABLED.key -> "true") {
val join = df.join(df, "id")
val broadcasted = join.hint("broadcast")
verifyJoinHint(
join.join(broadcasted, "id").join(broadcasted, "id"),
JoinHint(
None,
Some(HintInfo(broadcast = true))) ::
JoinHint(
None,
Some(HintInfo(broadcast = true))) ::
JoinHint.NONE :: JoinHint.NONE :: JoinHint.NONE :: Nil
)
}
}
}
|
WindCanDie/spark
|
sql/core/src/test/scala/org/apache/spark/sql/JoinHintSuite.scala
|
Scala
|
apache-2.0
| 6,834 |
package hlt
import java.io.{FileWriter, IOException}
import hlt.Move.{Dock, Noop, Thrust, Undock}
import scala.io.StdIn
object Networking {
private val UNDOCK_KEY = 'u'
private val DOCK_KEY = 'd'
private val THRUST_KEY = 't'
def sendMoves(moves: Iterable[Move]): Unit = {
val moveString = new StringBuilder
for (move <- moves) {
move.moveType match {
case Undock =>
moveString.append(UNDOCK_KEY).append(" ").append(move.ship.id).append(" ")
case Dock =>
moveString
.append(DOCK_KEY)
.append(" ")
.append(move.ship.id)
.append(" ")
.append(move.asInstanceOf[DockMove].planet.id)
.append(" ")
case Thrust =>
moveString
.append(THRUST_KEY)
.append(" ")
.append(move.ship.id)
.append(" ")
.append(move.asInstanceOf[ThrustMove].getThrust)
.append(" ")
.append(move.asInstanceOf[ThrustMove].getAngle)
.append(" ")
case Noop =>
}
}
println(moveString)
}
def readAndSplitLine: Iterator[String] = readLine.trim.split(" ").iterator
private def readLine: String = {
StdIn.readLine()
}
}
class Networking(botName: String) {
var (width, height, myId) = {
val myId = Networking.readLine.toShort
try Log.initialize(new FileWriter(s"${myId}_${botName}.log"))
catch {
case e: IOException =>
e.printStackTrace()
}
val inputStringMapSize = Networking.readAndSplitLine
val width = inputStringMapSize.next.toShort
val height = inputStringMapSize.next.toShort
// Associate bot name
println(botName)
(width, height, myId)
}
def nextGameMap(): GameMap = new GameMap(width, height, myId, Networking.readAndSplitLine)
}
|
HaliteChallenge/Halite-II
|
airesources/Scala/src/main/scala/hlt/Networking.scala
|
Scala
|
mit
| 1,841 |
/**
* Copyright (C) 2009-2011 the original author or authors.
* See the notice.md file distributed with this work for additional
* information regarding copyright ownership.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.fusesource.scalate.servlet
import org.fusesource.scalate.{Binding, TemplateEngine}
import org.fusesource.scalate.layout.{LayoutStrategy, DefaultLayoutStrategy}
import scala.tools.nsc.Global
import javax.servlet.{ServletException, ServletContext, ServletConfig}
import java.io.File
import org.fusesource.scalate.util._
object ServletTemplateEngine {
val log = Log(getClass); import log._
val templateEngineKey = classOf[ServletTemplateEngine].getName
/**
* Gets the current template engine
*
* @throws IllegalArgumentException if no template engine has been registered with the [[javax.servlet.ServletContext]]
*/
def apply(servletContext: ServletContext): ServletTemplateEngine = {
val answer = servletContext.getAttribute(templateEngineKey)
if (answer == null) {
throw new IllegalArgumentException("No ServletTemplateEngine instance registered on ServletContext for key " +
templateEngineKey + ". Are you sure your web application has registered the Scalate TemplateEngineServlet?")
}
else {
answer.asInstanceOf[ServletTemplateEngine]
}
}
/**
* Updates the current template engine - called on initialisation of the [[org.fusesource.scalate.TemplateEngineServlet]]
*/
def update(servletContext: ServletContext, templateEngine: ServletTemplateEngine) {
servletContext.setAttribute(templateEngineKey, templateEngine)
// now lets fire the bootstrap code
templateEngine.boot
}
/**
* Configures the given TemplateEngine to use the default servlet style layout strategy.
*
* The default layout files searched if no layout attribute is defined by a template are:
* * "WEB-INF/scalate/layouts/default.jade"
* * "WEB-INF/scalate/layouts/default.mustache"
* * "WEB-INF/scalate/layouts/default.scaml"
* * "WEB-INF/scalate/layouts/default.ssp"
*/
def setLayoutStrategy(engine: TemplateEngine): LayoutStrategy = {
engine.layoutStrategy = new DefaultLayoutStrategy(engine, TemplateEngine.templateTypes.map("/WEB-INF/scalate/layouts/default." + _):_*)
engine.layoutStrategy
}
/**
* Returns the source directories to use for the given config
*/
def sourceDirectories(config: Config): List[File] = {
config.getServletContext.getRealPath("/") match {
case path: String => List(new File(path))
case null => List()
}
}
}
/**
* A Servlet based TemplateEngine which initializes itself using a ServletConfig or a FilterConfig.
*
* The default layout files searched if no layout attribute is defined by a template are:
* * "WEB-INF/scalate/layouts/default.jade"
* * "WEB-INF/scalate/layouts/default.mustache"
* * "WEB-INF/scalate/layouts/default.scaml"
* * "WEB-INF/scalate/layouts/default.ssp"
* *
* @author <a href="http://hiramchirino.com">Hiram Chirino</a>
*/
class ServletTemplateEngine(val config: Config) extends TemplateEngine(ServletTemplateEngine.sourceDirectories(config)) {
import ServletTemplateEngine.log._
templateDirectories ::= "/WEB-INF"
bindings = List(Binding("context", "_root_."+classOf[ServletRenderContext].getName, true, isImplicit = true))
classpath = buildClassPath
classLoader = Thread.currentThread.getContextClassLoader
resourceLoader = new ServletResourceLoader(config.getServletContext)
ServletTemplateEngine.setLayoutStrategy(this)
bootInjections = List(this, config.getServletContext)
Option(config.getInitParameter("boot.class")).foreach(clazz=> bootClassName=clazz)
info("Scalate template engine using working directory: %s", workingDirectory)
private def buildClassPath(): String = {
val builder = new ClassPathBuilder
// Add optional classpath prefix via web.xml parameter
builder.addEntry(config.getInitParameter("compiler.classpath.prefix"))
// Add containers class path
builder.addPathFrom(getClass)
.addPathFrom(classOf[ServletConfig])
.addPathFrom(classOf[Product])
try {
builder.addPathFrom(classOf[Global])
} catch {
case x: Throwable => // the scala compiler might not be on the path.
}
// Always include WEB-INF/classes and all the JARs in WEB-INF/lib just in case
builder.addClassesDir(config.getServletContext.getRealPath("/WEB-INF/classes"))
.addLibDir(config.getServletContext.getRealPath("/WEB-INF/lib"))
// Add optional classpath suffix via web.xml parameter
builder.addEntry(config.getInitParameter("compiler.classpath.suffix"))
builder.classPath
}
}
|
dnatic09/scalate
|
scalate-core/src/main/scala/org/fusesource/scalate/servlet/ServletTemplateEngine.scala
|
Scala
|
apache-2.0
| 5,269 |
package com.timgroup.matchless
import org.specs2.matcher.{ Matcher, Expectable }
import org.specs2.matcher.MustMatchers._
import scala.collection.GenTraversableOnce
import com.timgroup.matchless.utils.Bag
trait CollectionMatchers {
def haveThePairs[K, V](pairs: (K, V)*) = PairsLikeMatcher(pairs.map(p => (p._1, beEqualTo(p._2))).toMap)
def havePairsLike[K, V](pairs: (K, Matcher[V])*) = PairsLikeMatcher(pairs.toMap)
def haveItemsLike[A](itemMatchers: Matcher[A]*) = ItemsLikeMatcher(itemMatchers.toList)
}
object CollectionMatchers extends CollectionMatchers
case class PairsLikeMatcher[K, V](pairMatchers: Map[K, Matcher[V]]) extends Matcher[Map[K, V]] {
def apply[S <: Map[K, V]](s: Expectable[S]) = {
val sharedKeys = pairMatchers.keySet.intersect(s.value.keySet)
val missingKeys = pairMatchers.keySet.diff(s.value.keySet)
val nonMatchingValues =
pairMatchers.filterKeys(sharedKeys)
.filter { case (key, matcher) => (s.value(key) must matcher).isFailure }
.map { case (key, matcher) => (key, (s.value(key) must matcher).message) }
.toMap
val report = pairMatchers.keySet.map(key =>
if (missingKeys.contains(key)) "* %s: <missing value>".format(key)
else if (nonMatchingValues.contains(key)) "* %s: %s".format(key, nonMatchingValues(key))
else "%s: %s".format(key, s.value(key))).mkString("\\n")
result(missingKeys.isEmpty && nonMatchingValues.isEmpty,
report,
"some of the expected key/value pairs were not present in the collection: \\n\\n" + report,
s)
}
}
trait Orderable[A] { self: Matcher[GenTraversableOnce[A]] =>
def inOrder: Matcher[GenTraversableOnce[A]]
}
trait Lenient[A] { self: Matcher[GenTraversableOnce[A]] =>
val matchers: Iterable[Matcher[A]]
def strictly = new Matcher[GenTraversableOnce[A]] with Orderable[A] {
override def apply[S <: GenTraversableOnce[A]](s: Expectable[S]) = makeStrict(matchers, self).apply(s)
override def inOrder = makeStrict(matchers, ItemsLikeInOrderMatcher(matchers))
def makeStrict(matchers: Iterable[Matcher[A]], innerMatcher: Matcher[GenTraversableOnce[A]]) =
new Matcher[GenTraversableOnce[A]] {
override def apply[S <: GenTraversableOnce[A]](s: Expectable[S]) = {
if (matchers.size !== s.value.size)
result(false, "", "Expected %s items, but found %s".format(matchers.size, s.value.size), s)
else innerMatcher(s)
}
}
}
}
case class ItemsLikeMatcher[A](matchers: Iterable[Matcher[A]]) extends Matcher[GenTraversableOnce[A]]
with Lenient[A] with Orderable[A] {
override def apply[S <: GenTraversableOnce[A]](s: Expectable[S]) = {
if (s.value.size < matchers.size) result(false, "", "Not enough items to find a unique match for every matcher", s)
else {
val matches = (for {
value <- s.value.toList
matcher <- matchers if (value must matcher).isSuccess
} yield matcher -> value).groupBy(_._1).mapValues(_.map(_._2))
if (matches.size < matchers.size) result(false, "", "Some of the expected items were not present in the collection", s)
else {
val search = matches.values.toList.sortBy(_.size).map(Bag(_))
val uniqueMatches = findUnique(search.head, Bag.empty, search.tail)
uniqueMatches match {
case None => result(false, "", "No set of items in the collection uniquely matched the supplied matchers", s)
case Some(matches) => result(true, "The items %s uniquely matched the supplied matchers".format(matches), "", s)
}
}
}
}
private[this] def findUnique(current: Bag[A], claimed: Bag[A], remaining: List[Bag[A]]): Option[Bag[A]] =
remaining match {
case Nil => current.headOption.map(claimed + _)
case l if l.exists(_.isEmpty) => None
case h :: t =>
if (current.isEmpty) None
else {
val x = current.head
val xs = current - x
findUnique(h - x, claimed + x, t.map(_ - x))
.orElse(findUnique(xs, claimed, remaining))
}
}
override def inOrder = ItemsLikeInOrderMatcher(matchers)
}
case class ItemsLikeInOrderMatcher[A](matchers: Iterable[Matcher[A]]) extends Matcher[GenTraversableOnce[A]] with Lenient[A] {
def apply[S <: GenTraversableOnce[A]](s: Expectable[S]) = {
def checkMatchers(matchers: Iterable[Matcher[A]], values: GenTraversableOnce[A]): Boolean =
matchers match {
case Nil => true
case m :: ms => values match {
case Nil => false
case v :: vs => if ((v must m).isSuccess) checkMatchers(ms, vs) else checkMatchers(matchers, vs)
}
}
result(checkMatchers(matchers, s.value),
"The items %s matched the supplied matchers in order".format(s.value),
"The expected items were not present in the collection in the expected order",
s)
}
}
|
tim-group/matchless
|
src/main/scala/com/timgroup/matchless/CollectionMatchers.scala
|
Scala
|
mit
| 4,879 |
package ca.hyperreal.scalgorithms
import collection.mutable._
//import math._
import scala.reflect.ClassTag
/**
<code>Matrix</code> is the base class for all matrix types. This abstract class provides a virtually
complete mathematical matrix implementation. Concrete subclasses need on provide an <code>apply( Int, Int ): R</code> method for
reading elements of the matrix. Everything else will work based on that, however, subclasses
are expected to taylor how the results of some of the operations are produced in order to improve
efficiency.
<p>
The
type parameter <code>R</code> can be any type that either extends <code>Ring[R]</code> or for which there is
an implicit conversion (view). This class extends <code>Ring[Matrix[R]]</code> to recognize that fact
that some types of matrices do form a ring. For example the set of nxn square matrices form a ring,
with the set of nxn circulant matrices forming a subring. Many instances of this class cannot be said to
be ring elements, so the fact this class extends <code>Ring[Matrix[R]]</code> is merely
a convenience to allow instances to be treated as such, if it makes sense mathematically to do so.
<p>
This class is also <code>Iterable</code. An iterator will return the matrix elements from row 1 to row
<code>rows</code>, and from column 1 to column <code>cols</code>. It is intended that this feature be used in
conjunction with the various "view" methods.
<p>
<p>
@author Edward A. Maxedon
*/
abstract class Matrix[R <: Ring[R]] extends ((Int, Int) => R) with (Matrix[R] => Matrix[R]) with Iterable[R] with Ring[Matrix[R]]
{
/** The number of rows in the matrix */
val rows: Int
/** The number of columns in the matrix */
val cols: Int
if (rows <= 0 || cols <= 0)
sys.error( "Matrix: number of rows and columns must be positive" )
/**
Returns a matrix containing the result of some operation on <tt>this</tt> matrix, where the value
of each element of the resulting matrix is given by a function <tt>f: (Int, Int) => R</tt>, which
assigns that value to the corresponding (row, column) coordinates
*/
def operation( f: (Int, Int) => R ): Matrix[R] = operation( rows, cols, f )
def operation( rows: Int, cols: Int, f: (Int, Int) => R ): Matrix[R] = aspect( rows, cols, f )
def +( that: Matrix[R] ) =
{
if (rows != that.rows || cols != that.cols)
sys.error( "+: matrices cannot be added" )
operation( (i, j) => this(i, j) + that(i, j) )
}
def *( that: Matrix[R] ) =
{
if (cols != that.rows)
sys.error( "*: matrices cannot be multiplied" )
operation( rows, that.cols, (i, j) => rowView(i) dot that.columnView(j) )
}
def apply( that: Matrix[R] ) = this * that
def *:( x: Int ) = operation( x*:this(_, _) )
def *:( x: R ) = operation( x*this(_, _) )
def scale( r: R ) = operation( r*this(_, _) )
def -( that: Matrix[R] ) = this + -that
def /( that: Matrix[R] ) = *( inv )
def dot( that: Matrix[R] ) =
{
require( (isRow || isColumn) && (that.isRow || that.isColumn), "dot: expected row or column matrices" )
val l = toList
val r = that.toList
if (l.length != r.length)
sys.error( "dot: matrices must have the same number of elements" )
(l zip r) map (p => p._1*p._2.conj) reduceLeft (_ + _)
}
def unary_- = operation( -this(_, _) )
def conj = operation( this(_, _).conj )
def isUnital = true
def isUnit = isInvertible
def isInvertible = det.isUnit
def transpose = operation( (i, j) => this(j, i) )
def conjTranspose = conj.transpose
def tr = diagonal reduceLeft (_ + _)
def inv = inverse
def inverse: Matrix[R] = det.inv*:adj
def minor( row: Int, col: Int ) = dropView( row, col ).det
def cofactor( row: Int, col: Int ) =
if ((row + col)%2 == 0)
minor( row, col )
else
-minor( row, col )
def cofactors = operation( cofactor )
def adj = cofactors.transpose
def block( row: Int, col: Int, rows: Int, cols: Int ) =
operation( rows, cols, (i, j) => this(row - 1 + i, col - 1 + j) )
def dropFunc( row: Int, col: Int, i: Int, j: Int ) = this( if (i >= row) i + 1 else i, if (j >= col) j + 1 else j )
def drop( row: Int, col: Int ) =
{
if (isRow || isColumn)
sys.error( "drop: can't drop from a row or column matrix" )
operation( rows - 1, cols - 1, dropFunc(row, col, _, _) )
}
def det: R =
{
if (!isSquare)
sys.error( "det: need square matrix" )
rows match
{
case 1 => this( 1, 1 )
case 2 => this( 1, 1 )*this( 2, 2 ) - this( 1, 2 )*this( 2, 1 )
case _ => (for (i <- 1 to rows) yield this( i, 1 )*cofactor( i, 1 )) reduceLeft (_ + _)
}
}
def iterator: Iterator[R] =
{
(for (i <- 1 to rows; j <- 1 to cols) yield this( i, j )).iterator
}
def copyToArray( it: Iterator[R], array: Array[Array[R]] )
{
for (i <- 0 until rows; j <- 0 until cols)
array(i)(j) = it.next
}
def toArray( implicit tag: ClassTag[R] ) =
{
val res = Array.ofDim[R]( rows, cols )
copyToArray( iterator, res )
res
}
protected def swap( array: Array[Array[R]], r1: Int, r2: Int ) // just swap the array elements
{
val t = array(r1)
array(r1) = array(r2)
array(r2) = t
// val rs = array.length
// val cs = array(0).length
//
// for (j <- 0 until cs)
// {
// val t = array(r1)(j)
//
// array(r1)(j) = array(r2)(j)
// array(r2)(j) = t
// }
}
def rref( implicit tag: ClassTag[R] ) =
{
val a = toArray
for (i <- 0 until math.min( rows, cols ))
{
if (a(i)(i).isZero)
{
val nz = for (j <- i+1 until rows; if !a(j)(i).isZero) yield j
if (nz.isEmpty)
sys.error( "can't be put into row echelon form" )
val r = nz.head
swap( a, i, r )
}
val d = a(i)(i)
for (j <- i until cols)
a(i)(j) = a(i)(j)/d
for (j <- 0 until rows; if j != i)
{
val m = a(j)(i)
for (k <- 0 until cols)
a(j)(k) = a(j)(k) - a(i)(k)*m
}
}
new ConcreteMatrix[R]( a )
}
def column( col: Int ) = block( 1, col, rows, 1 )
def row( row: Int ) = block( row, 1, 1, cols )
def diagonal: List[R] =
{
if (!isSquare)
sys.error( "diagonal: need square matrix" )
(for (i <- 1 to rows) yield this( i, i )).toList
}
def concrete( implicit tag: ClassTag[R] ) = Matrix( rows, cols, this(_, _) )
def isRow = rows == 1
def isColumn = cols == 1
def isZero = all( (i, j) => this(i, j).isZero )
def isOne = all( (i, j) => if (i == j) this( i, j ).isOne else this( i, j ).isZero )
def isSquare = rows == cols
def isHermitian = this == conjTranspose
def isSkewHermitian = this == -conjTranspose
def isSymmetric = this == transpose
def isSkewSymmetric = this == -transpose
def isOrthogonal = transpose == inv
def isOrthonormal( implicit conv: Int => R ) =
{
val d = det
isOrthogonal && (d == conv(1) || d == conv(-1))
}
def isUnitary = conjTranspose == inv
def isDiagonal = all( (i, j) => i == j || (i != j && this(i, j).isZero) )
def all( p: (Int, Int) => Boolean ): Boolean =
{
for (i <- 1 to rows; j <- 1 to cols)
if (!p( i, j ))
return false
true
}
def some( p: (Int, Int) => Boolean ): Boolean =
{
for (i <- 1 to rows; j <- 1 to cols)
if (p( i, j ))
return true
false
}
final def aspect( f: (Int, Int) => R ): Matrix[R] = aspect( rows, cols, f )
final def aspect: Matrix[R] = aspect( this )
final def aspect( rows: Int, cols: Int, f: (Int, Int) => R ): Matrix[R] =
{
if (rows < 1 || rows > this.rows || cols < 1 || cols > this.cols)
sys.error( "aspect out of bounds" )
new ViewMatrix[R]( rows, cols, f )
}
final def aspect( row: Int, col: Int, rows: Int, cols: Int ): Matrix[R] =
aspect( rows, cols, (i: Int, j: Int) => this(row - 1 + i, col - 1 + j) )
final def columnView( col: Int ): Matrix[R] = aspect( 1, col, rows, 1 )
final def rowView( row: Int ): Matrix[R] = aspect( row, 1, 1, cols )
final def dropView( row: Int, col: Int ) =
{
if (isRow || isColumn)
sys.error( "drop: can't drop from a row or column matrix" )
aspect( rows - 1, cols - 1, dropFunc(row, col, _, _) )
}
override def hashCode =
{
var res = 0
foreach {res ^= _.hashCode}
res
}
override def equals( a: Any ) =
{
if (!a.isInstanceOf[Matrix[R]])
false
else
all( (i, j) => a.asInstanceOf[Matrix[R]](i, j) == this(i, j) )
}
override def toString =
{
val widths = new Array[Int]( cols )
def format( a: Iterable[R] ) = a.toList.zipWithIndex.map( _ match {case (e, i) => ("%" + widths( i ) + "s").format(e)} )
for (i <- 0 until cols; e <- columnView( i + 1 ))
{
widths( i ) = math.max( widths(i), e.toString.length )
}
if (rows == 1)
rowView( 1 ).mkString( "< ", " ", " >" )
else
{
val buf = new StringBuilder
format( rowView(1) ).addString( buf, "/ ", " ", " \\\\\\n" )
for (i <- 2 to rows - 1)
format( rowView(i) ).addString( buf, "| ", " ", " |\\n" )
format( rowView(rows) ).addString( buf, "\\\\ ", " ", " /" )
buf.toString
}
}
}
object Matrix
{
def apply[R <: Ring[R]]( data: List[R]* )( implicit tag: ClassTag[R] ) =
{
var m: ConcreteMatrix[R] = null
var cols = -1
var i = 0
for (r <- data)
{
if (cols == -1)
{
cols = r.length
m = new ConcreteMatrix[R]( data.length, cols )
}
else if (cols != r.length)
sys.error( "row lists must all be the same length" )
if (cols == 0)
sys.error( "row list cannot be empty" )
var j = 0
for (e <- r)
{
m.array(i)(j) = e
j += 1
}
i += 1
}
m
}
def apply[R <: Ring[R]]( data: List[List[R]] )( implicit tag: ClassTag[R] ): Matrix[R] = Matrix( data: _* )
def apply[R <: Ring[R]]( rows: Int, data: R* )( implicit tag: ClassTag[R] ) = new ConcreteMatrix[R]( rows, data: _* )
def apply( rows: Int, data: Double* ): Matrix[MachineFloat] = Matrix( rows, data.map(MachineFloat(_)): _* )
def apply[R <: Ring[R]]( rows: Int, cols: Int, f: (Int, Int) => R )( implicit tag: ClassTag[R] ) =
{
val m = new ConcreteMatrix[R]( rows, cols )
for (i <- 1 to rows; j <- 1 to cols)
m.array( i - 1 )( j - 1 ) = f( i, j )
m
}
def scalar[R <: Ring[R]]( size: Int, c: R )( implicit conv: Int => R, tag: ClassTag[R] ) = new ScalarMatrix[R]( size, c ).concrete
def identity[R <: Ring[R]]( size: Int )( implicit conv: Int => R, tag: ClassTag[R] ) = scalar[R]( size, 1 )
def diagonal[R <: Ring[R]]( ds: R* )( implicit conv: Int => R, tag: ClassTag[R] ) = new DiagonalMatrix[R]( ds: _* ).concrete
def batch[R <: Ring[R]]( ms: Matrix[R]* )( implicit tag: ClassTag[R] ) = new BatchMatrix[R]( ms: _* ).concrete
def column[R <: Ring[R]]( rs: R* )( implicit tag: ClassTag[R] ) = Matrix[R]( rs.length, rs: _* )
def row[R <: Ring[R]]( rs: R* )( implicit tag: ClassTag[R] ) = Matrix[R]( 1, rs: _* )
def norm( m: Matrix[MachineFloat] ) =
{
require( m.isRow || m.isColumn, "dot: expected row or column matrix" )
math.sqrt( m.toList.map(e => e*e).reduceLeft(_ + _).a )
}
}
class ScalarMatrix[R <: Ring[R]]( size: Int, c: R )( implicit conv: Int => R ) extends Matrix[R]
{
val rows = size
val cols = size
private val z = conv( 0 )
def apply( row: Int, col: Int ) = if (row == col) c else z
}
class DiagonalMatrix[R <: Ring[R]]( ds: R* )( implicit conv: Int => R ) extends Matrix[R]
{
val rows = ds.length
val cols = rows
if (ds.length == 0)
sys.error( "DiagonalMatrix: diagonal must have at least one element" )
private val z = conv( 0 )
def apply( row: Int, col: Int ) = if (row == col) ds( row - 1 ) else z
}
class ConcreteMatrix[R <: Ring[R]]( val rows: Int, val cols: Int )( implicit tag: ClassTag[R] ) extends Matrix[R]
{
if (rows < 1 || cols < 1)
sys.error( "matrix dimensions must be positive" )
protected var _det: Option[R] = None
protected var _inv: Option[ConcreteMatrix[R]] = None
private [scalgorithms] val array = Array.ofDim[R]( rows, cols )
def this( rows: Int, data: R* )( implicit tag: ClassTag[R] )
{
this( rows, data.length/rows )
if (data.length % rows != 0)
sys.error( "data length must be a multiple of rows" )
copyToArray( data.iterator, array )
}
def this( a: Array[Array[R]] )( implicit tag: ClassTag[R] )
{
this( a.length, a(0).length )
for (i <- 0 until rows; j <- 0 until cols)
array(i)(j) = a(i)(j)
}
def apply( row: Int, col: Int ) = array( row - 1 )( col - 1 )
override def operation( rows: Int, cols: Int, f: (Int, Int) => R ) = Matrix( rows, cols, f )
override def det =
{
if (_det == None)
_det = Some( super.det )
_det.get
}
override def inverse: Matrix[R] =
{
if (_inv == None)
_inv = Some( super.inverse.concrete )
_inv.get
}
}
class ViewMatrix[R <: Ring[R]]( val rows: Int, val cols: Int, view: (Int, Int) => R ) extends Matrix[R]
{
def apply( row: Int, col: Int ) = view( row, col )
}
class DotProductMatrix[R <: Ring[R]]( ms: Matrix[R]* ) extends ViewMatrix[R]( ms.length, ms.length,
(i, j) => ms(i - 1) dot ms(j - 1) )
//class GramianMatrix[F <: Field[F]]( vs: Vector[F]* ) extends ViewMatrix[F]( vs.length, vs.length,
// (i, j) => vs(i - 1) inner vs(j - 1) )
class BatchMatrix[R <: Ring[R]]( ms: Matrix[R]* ) extends Matrix[R]
{
if (ms.length == 0)
sys.error( "columns: expected at least one row or column matrix" )
private val rows_ = ms.head.rows
private val cols_ = ms.head.cols
for (m <- ms)
{
if (m.isRow && m.isColumn)
sys.error( "BatchMatrix: component matrices must have more than one entry" )
if (!m.isRow && !m.isColumn)
sys.error( "BatchMatrix: expected only row or column matrices" )
if (m.rows != rows_ || m.cols != cols_)
sys.error( "BatchMatrix: all component matrices must have the same size" )
}
val rows =
if (rows_ == 1)
ms.length
else
rows_
val cols =
if (cols_ == 1)
ms.length
else
cols_
private val v =
if (rows_ == 1)
(r: Int, c: Int) => ms(r - 1)( 1, c )
else
(r: Int, c: Int) => ms(c - 1)( r, 1 )
def apply( row: Int, col: Int ) = v( row, col )
}
|
edadma/scalgorithms
|
src/main/scala/Matrix.scala
|
Scala
|
mit
| 13,986 |
package com.arcusys.valamis.storyTree.service.impl
import java.net.URI
import com.arcusys.learn.liferay.LiferayClasses.LUser
import com.arcusys.learn.liferay.services.{CompanyHelper, UserLocalServiceHelper}
import com.arcusys.valamis.exception.EntityNotFoundException
import com.arcusys.valamis.lesson.model.{Lesson, LessonStates}
import com.arcusys.valamis.lesson.service.{LessonService, LessonStatementReader, TeacherLessonGradeService, UserLessonResultService}
import com.arcusys.valamis.lesson.tincan.service.LessonCategoryGoalService
import com.arcusys.valamis.lrs.api.StatementApi
import com.arcusys.valamis.lrssupport.lrs.service.LrsClientManager
import com.arcusys.valamis.lrssupport.lrs.service.util.TinCanVerbs
import com.arcusys.valamis.utils.TincanHelper
import com.arcusys.valamis.lrs.tincan.{Activity, Agent}
import TincanHelper._
import com.arcusys.learn.liferay.util.PortalUtilHelper
import com.arcusys.valamis.persistence.common.{DatabaseLayer, SlickProfile}
import com.arcusys.valamis.storyTree.model._
import com.arcusys.valamis.storyTree.service.StoryTreeStatusService
import com.arcusys.valamis.storyTree.storage.StoryTreeTableComponent
import com.arcusys.valamis.storyTree.storage.query.StoryQueries
import com.arcusys.valamis.util.Joda._
import org.joda.time.DateTime
import scala.concurrent.ExecutionContext.Implicits.global
import slick.driver._
import slick.jdbc._
abstract class StoryTreeStatusServiceImpl(val db: JdbcBackend#DatabaseDef,
val driver: JdbcProfile)
extends StoryTreeStatusService
with StoryTreeTableComponent
with SlickProfile
with StoryQueries
with DatabaseLayer {
def lessonService: LessonService
def lessonCategoryGoalService: LessonCategoryGoalService
def lrsClient: LrsClientManager
def lessonResultService: UserLessonResultService
def teacherGradeService: TeacherLessonGradeService
def lessonStatementReader: LessonStatementReader
import driver.api._
import DatabaseLayer._
override def get(treeId: Long, userId: Long): StoryTreeStatus = execSyncInTransaction {
val companyId = CompanyHelper.getCompanyId
trees.filterById(treeId).result.headOption ifSomeThen { tree =>
for {
ns <- nodes.filterByTreeId(treeId).result
packageItems <- packages.filterByTreeId(treeId).result
} yield {
val user = UserLocalServiceHelper().getUser(userId)
CompanyHelper.setCompanyId(companyId)
lrsClient.statementApi {
getTreeStatus(tree, ns, packageItems)(_, user)
}(CompanyHelper.getCompanyId)
}
} map(_.getOrElse(throw new EntityNotFoundException(s"Story with id $treeId not found")))
}
private def getTreeStatus(tree: Story,
nodes: Seq[StoryNode],
packages: Seq[StoryPackageItem])
(implicit statementReader: StatementApi,
user: LUser): StoryTreeStatus = {
val nodesStatuses = nodes
.filter(_.parentId.isEmpty)
.map(getNodeStatus(_, nodes, packages))
val progress = if (nodesStatuses.isEmpty) 0.0
else nodesStatuses.map(_.progress).sum / nodesStatuses.size
StoryTreeStatus(
tree.id.get,
progress,
maxDate(nodesStatuses.flatMap(_.lastDate)),
tree.title,
tree.description,
nodesStatuses
)
}
private def getNodeStatus(node: StoryNode,
nodes: Seq[StoryNode],
packages: Seq[StoryPackageItem])
(implicit statementReader: StatementApi,
user: LUser): StoryNodeStatus = {
val nodesStatus = nodes
.filter(_.parentId == node.id)
.map(getNodeStatus(_, nodes, packages))
val packagesStatus = packages
.filter(_.nodeId == node.id.get)
.map(getPackageStatus)
val items = nodesStatus.map(_.progress) ++ packagesStatus.map(_.progress)
val progress = if (items.isEmpty) 0.0
else items.sum / items.size
StoryNodeStatus(
node.id.get,
progress,
maxDate(nodesStatus.flatMap(_.lastDate)),
node.title,
node.description,
node.comment,
nodesStatus,
packagesStatus
)
}
private def getPackageStatus(packageItem: StoryPackageItem)
(implicit statementReader: StatementApi,
user: LUser): StoryPackageStatus = {
val lesson = lessonService.getLesson(packageItem.packageId)
val activityId = lesson.map(lessonService.getRootActivityId)
if (lesson.isDefined && activityId.isDefined)
getPackageStatus(packageItem, lesson.get, activityId.get)
else
getEmptyPackageStatus(packageItem)
}
private def getEmptyPackageStatus(packageItem: StoryPackageItem): StoryPackageStatus = {
StoryPackageStatus(
packageItem.id.get,
packageItem.packageId,
progress = 0,
lastDate = None,
title = None,
description = None,
relationComment = None,
topics = Seq()
)
}
private def getPackageStatus(packageItem: StoryPackageItem,
lesson: Lesson,
rootActivityId: String)
(implicit statementReader: StatementApi,
user: LUser): StoryPackageStatus = {
val agent = user.getAgentByUuid
val lessonResult = lessonResultService.get(lesson, user)
val teacherGrade = teacherGradeService.get(user.getUserId, lesson.id).flatMap(_.grade)
val state = lesson.getLessonStatus(lessonResult, teacherGrade)
val categoryGoal = lessonCategoryGoalService.get(lesson.id)
val experienceStatements = lessonStatementReader.getExperienced(agent, rootActivityId)
val lastDate = lessonResult.lastAttemptDate
val topics = categoryGoal.map { goal =>
val categoryGoalStatements = experienceStatements
.filter(s =>
s.context.isDefined &&
s.context.get.contextActivities.isDefined &&
s.context.get.contextActivities.get.category.exists(c => c.id == goal.category)
)
val count = categoryGoalStatements.map(_.obj).distinct.size
val progress = if (count == 0) 0.0
else if (count >= goal.count) 1.0
else count / goal.count
new StoryTopicStatus(
goal.id.get,
progress,
lastDate,
goal.name
)
}
val progress = if (topics.isEmpty) { if (state.contains(LessonStates.Finished)) 1.0 else 0.0 }
else topics.map(_.progress).sum / topics.size
StoryPackageStatus(
packageItem.id.get,
lesson.id,
progress,
lastDate,
Some(lesson.title),
Some(lesson.description),
packageItem.comment,
topics
)
}
private def maxDate(dates: Seq[DateTime]): Option[DateTime] = {
if (dates.isEmpty) None
else Some(dates.max)
}
}
|
arcusys/Valamis
|
valamis-storyTree/src/main/scala/com/arcusys/valamis/storyTree/service/impl/StoryTreeStatusServiceImpl.scala
|
Scala
|
gpl-3.0
| 6,936 |
package com.ubirch.auth.model.db
/**
* author: cvandrei
* since: 2017-03-13
*/
case class OidcProviderEndpoints(authorization: String,
token: String,
jwks: String
)
|
ubirch/ubirch-auth-service
|
model-db/src/main/scala/com/ubirch/auth/model/db/OidcProviderEndpoints.scala
|
Scala
|
apache-2.0
| 270 |
/**
* License
* =======
*
* The MIT License (MIT)
*
*
* Copyright (c) 2017 Antoine DOERAENE @sherpal
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
package gui
class AnchorPoint(val point: Point, val relativeTo: Region, val relativePoint: Point,
val xOffset: Double = 0, val yOffset: Double = 0)
object AnchorPoint {
def apply(point: Point, relativeTo: Region, relativePoint: Point,
xOffset: Double = 0, yOffset: Double = 0): AnchorPoint = new AnchorPoint(
point, relativeTo, relativePoint, xOffset, yOffset
)
}
|
sherpal/scalajs-ui
|
src/main/scala/gui/AnchorPoint.scala
|
Scala
|
mit
| 1,631 |
package akka.persistence.hbase.journal
import akka.actor.{ Actor, ActorLogging }
import akka.persistence.hbase.common._
import akka.persistence.hbase.common.Const._
import akka.persistence.journal.AsyncWriteJournal
import akka.persistence.{ PersistenceSettings, PersistentConfirmation, PersistentId, PersistentRepr }
import org.apache.hadoop.hbase.util.Bytes
import scala.collection.immutable
import scala.concurrent._
import java.io.PrintWriter
import akka.event.LoggingAdapter
/**
* Asyncronous HBase Journal.
*
* Uses AsyncBase to implement asynchronous IPC with HBase.
*/
class HBaseAsyncWriteJournal extends Actor with ActorLogging
with HBaseJournalBase with AsyncWriteJournal
with HBaseAsyncRecovery {
import RowTypeMarkers._
override implicit val logger: LoggingAdapter = log
private lazy val config = context.system.settings.config
implicit override lazy val settings = PluginPersistenceSettings(config, JOURNAL_CONFIG)
lazy val hadoopConfig = HBaseJournalInit.getHBaseConfig(config, JOURNAL_CONFIG)
lazy val client = HBaseClientFactory.getClient(settings, new PersistenceSettings(config.getConfig("akka.persistence")))
val enableExportSequence: Boolean = config.getBoolean("akka.persistence.export-sequence.enable-export")
val exportProcessorId: String = config.getString("akka.persistence.export-sequence.processor-id")
val exportSequenceFile: String = config.getString("akka.persistence.export-sequence.file")
var printerWriter: java.io.PrintWriter = null
val replayGapRetry: Int = config.getInt("akka.persistence.replay-gap-retry")
val skipGap: Boolean = config.getBoolean("akka.persistence.skip-gap")
lazy val publishTestingEvents = settings.publishTestingEvents
implicit override val executionContext = context.system.dispatchers.lookup(settings.pluginDispatcherId)
HBaseJournalInit.createTable(config, Const.JOURNAL_CONFIG)
import Bytes._
import Columns._
import DeferredConversions._
import collection.JavaConverters._
// journal plugin api impl -------------------------------------------------------------------------------------------
override def asyncWriteMessages(persistentBatch: immutable.Seq[PersistentRepr]): Future[Unit] = {
// log.debug(s"Write async for ${persistentBatch.size} presistent messages")
persistentBatch map { p =>
import p._
// println(RowKey(processorId, sequenceNr).toKeyString)
executePut(
RowKey(processorId, sequenceNr).toBytes,
Array(ProcessorId, SequenceNr, Marker, Message),
Array(toBytes(processorId), toBytes(sequenceNr), toBytes(AcceptedMarker), persistentToBytes(p)),
false // forceFlush to guarantee ordering
)
}
Future(())
}
override def asyncWriteConfirmations(confirmations: immutable.Seq[PersistentConfirmation]): Future[Unit] = {
// log.debug(s"AsyncWriteConfirmations for ${confirmations.size} messages")
val start = System.currentTimeMillis()
val fs = confirmations map { confirm =>
confirmAsync(confirm.processorId, confirm.sequenceNr, confirm.channelId)
}
Future.sequence(fs) map {
case _ =>
val last = System.currentTimeMillis() - start
if (last > 3000) {
logger.info(s""" \n${">" * 15} confirm write response slow with ${last}ms""")
}
flushWrites()
}
}
override def asyncDeleteMessages(messageIds: immutable.Seq[PersistentId], permanent: Boolean): Future[Unit] = {
// log.debug(s"Async delete [${messageIds.size}] messages, premanent: $permanent")
val doDelete = deleteFunctionFor(permanent)
val deleteFutures = for {
messageId <- messageIds
rowId = RowKey(messageId.processorId, messageId.sequenceNr)
} yield doDelete(rowId.toBytes)
Future.sequence(deleteFutures) map { case _ => flushWrites() }
}
override def asyncDeleteMessagesTo(processorId: String, toSequenceNr: Long, permanent: Boolean): Future[Unit] = {
// log.debug(s"AsyncDeleteMessagesTo for processorId: [$processorId] to sequenceNr: $toSequenceNr, premanent: $permanent")
val doDelete = deleteFunctionFor(permanent)
val scanner = newSaltedScanner(settings.partitionCount, serialization)
scanner.setSaltedStartKeys(processorId, 1)
scanner.setSaltedStopKeys(processorId, RowKey.toSequenceNr(toSequenceNr))
scanner.setMaxNumRows(settings.scanBatchSize)
scanner.setKeyRegexp(processorId)
def handleRows(in: AnyRef): Future[Unit] = in match {
case null =>
// log.debug("AsyncDeleteMessagesTo finished scanning for keys")
flushWrites()
scanner.close()
Future(Array[Byte]())
case rows: AsyncBaseRows =>
val deletes = for {
row <- rows.asScala
col <- row.asScala.headOption // just one entry is enough, because is contains the key
} yield doDelete(col.key)
go() flatMap { _ => Future.sequence(deletes) }
}
def go() = scanner.nextRows() flatMap handleRows
go()
}
// end of journal plugin api impl ------------------------------------------------------------------------------------
def confirmAsync(processorId: String, sequenceNr: Long, channelId: String): Future[Unit] = {
// log.debug(s"Confirming async for processorId: [$processorId], sequenceNr: $sequenceNr and channelId: $channelId")
executePut(
RowKey(processorId, sequenceNr).toBytes,
Array(Marker),
Array(confirmedMarkerBytes(channelId)),
false // not to flush immediately
)
}
private def deleteFunctionFor(permanent: Boolean): (Array[Byte]) => Future[Unit] = {
if (permanent) deleteRow
else markRowAsDeleted
}
override def preStart(): Unit = {
if (enableExportSequence)
printerWriter = new PrintWriter(new java.io.File(exportSequenceFile))
}
override def postStop(): Unit = {
// client could be shutdown once at here, another user: HBaseSnapshotter should not shut down it,
// for it may still be used here
if (enableExportSequence)
printerWriter.close()
HBaseClientFactory.shutDown()
super.postStop()
}
}
|
hossx/akka-persistence-hbase
|
src/main/scala/akka/persistence/hbase/journal/HBaseAsyncWriteJournal.scala
|
Scala
|
apache-2.0
| 6,108 |
package com.github.gdefacci.bdd
package testkit
import com.github.gdefacci.bdd.Feature
class TestInfos(val results: Seq[FeatureRun]) {
lazy val resultEvents: Seq[RunEvent] = results.flatMap { fr =>
fr.scenarioGroups.flatMap { sg =>
sg.scenarios.flatMap {
sc => sc.events
}
}
}
lazy val (startTime: Long, endTime: Long) = results.flatMap(_.scenarioGroups.flatMap(_.scenarios)).foldLeft(0l -> 0l) { (acc, i) =>
val (mn, mx) = acc
Math.min(mn, i.startTime) -> Math.max(mx, i.endTime)
}
lazy val totalTime: Long = endTime - startTime
lazy val (successfullScenarios, failedScenarios) = results.flatMap(_.scenarioRuns).partition(_.isSuccessfull)
lazy val errors: Seq[ErrorEvent] = resultEvents.collect {
case ev: ErrorEvent => ev
}
lazy val completedSteps: Seq[StepSuccess] = resultEvents.collect {
case ev: StepSuccess => ev
}
lazy val successfullExpectations: Seq[ExpectationSuccess] = resultEvents.collect {
case ev: ExpectationSuccess => ev
}
}
|
gdefacci/bdd
|
testkit/src/main/scala/com/github/gdefacci/bdd/testkit/TestInfos.scala
|
Scala
|
mit
| 1,025 |
package com.tngtech.jgiven.scala.example
import org.scalatest.Assertion
import org.scalatest.matchers.should.Matchers._
class Steps {
var someInt = 0
def some_another_state(): Unit = {
someInt = 5
}
def some_action(): Unit = {
someInt *= 2
}
def some_outcome: Assertion = {
someInt should be(10)
}
}
|
TNG/JGiven
|
example-projects/scala/src/test/scala/com/tngtech/jgiven/scala/example/Steps.scala
|
Scala
|
apache-2.0
| 333 |
package ml.combust.mleap.runtime.transformer.regression
import ml.combust.mleap.core.regression.LinearRegressionModel
import ml.combust.mleap.core.types._
import ml.combust.mleap.runtime.frame.{DefaultLeapFrame, Row}
import ml.combust.mleap.tensor.Tensor
import org.apache.spark.ml.linalg.Vectors
import org.scalatest.FunSpec
/**
* Created by hollinwilkins on 9/15/16.
*/
class LinearRegressionSpec extends FunSpec {
val schema = StructType(Seq(StructField("features", TensorType(BasicType.Double)))).get
val dataset = Seq(Row(Tensor.denseVector(Array(20.0, 10.0, 5.0))))
val frame = DefaultLeapFrame(schema, dataset)
val linearRegression = LinearRegression(shape = NodeShape.regression(),
model = LinearRegressionModel(coefficients = Vectors.dense(Array(1.0, 0.5, 5.0)),
intercept = 73.0))
describe("LinearRegression") {
describe("#transform") {
it("executes the linear regression model and outputs a prediction") {
val frame2 = linearRegression.transform(frame).get
val prediction = frame2.dataset(0).getDouble(1)
assert(prediction == 123.0)
}
describe("with invalid features input") {
it("returns a Failure") {
val frame2 = linearRegression.copy(shape = NodeShape.regression(featuresCol = "bad_features")).transform(frame)
assert(frame2.isFailure)
}
}
}
}
describe("input/output schema") {
it("has the correct inputs and outputs") {
assert(linearRegression.schema.fields ==
Seq(StructField("features", TensorType.Double(3)),
StructField("prediction", ScalarType.Double.nonNullable)))
}
}
}
|
combust-ml/mleap
|
mleap-runtime/src/test/scala/ml/combust/mleap/runtime/transformer/regression/LinearRegressionSpec.scala
|
Scala
|
apache-2.0
| 1,654 |
package edu.berkeley.nlp.coref.bp
import edu.berkeley.nlp.coref.config.CorefSystemConfiguration
import scala.collection.mutable.ArrayBuffer
import edu.berkeley.nlp.futile.util.Logger
import edu.berkeley.nlp.coref._
import scala.util.Random
class DocumentFactorGraph(val docGraph: DocumentGraph,
val featurizer: PairwiseIndexingFeaturizer,
val config: CorefSystemConfiguration,
val gold: Boolean) {
var featsChart = docGraph.featurizeIndexNonPrunedUseCache(featurizer)
val antecedentNodes = new Array[Node[Int]](docGraph.size)
// val latentNodes = new Array[Array[Node[String]]](docGraph.size);
val latentNodes: Array[Array[Node[String]]] = Array.tabulate(docGraph.size)(i => new Array[Node[String]](docGraph.numClusterers))
val latentProjClusterNodes: Array[Array[Node[String]]] = Array.tabulate(docGraph.size)(i => new Array[Node[String]](docGraph.numClusterers))
val allNodes = new ArrayBuffer[Node[_]]()
val allNodesEveryIter = new ArrayBuffer[Node[_]]()
val antecedentUnaryFactors = new Array[UnaryFactorOld](docGraph.size)
val latentUnaryFactors: Array[Array[Factor]] = Array.tabulate(docGraph.size)(i => new Array[Factor](docGraph.numClusterers))
// N.B. we don't know how big the innermost array is so don't mess with it
val latentAgreementFactors: Array[Array[Array[Factor]]] = Array.tabulate(docGraph.size)(i => new Array[Array[Factor]](docGraph.numClusterers))
val latentProjClusterAgreementFactors: Array[Array[AgreementFactor]] = Array.tabulate(docGraph.size)(i => new Array[AgreementFactor](docGraph.numClusterers))
val allFactors = new ArrayBuffer[Factor]()
val allFactorsEveryIter = new ArrayBuffer[Factor]()
// LATENT
// Notes on the clusterFeats options as they relate here: normally the projection
// may reduce the number of clusters (if corefClusters is smaller than the number
// of defined clusters), but sometimes we just want the square projection matrix for
// clusters of different sizes. Therefore, there's also the "preserveclusterdomains"
// option that preserves all cluster domains.
val numClusterers = docGraph.numClusterers
val numLatentClustersVect = (0 until numClusterers).map(docGraph.numClusters(_))
val numCorefClustersVect = if (config.clusterFeats.contains("proj") && !config.clusterFeats.contains("preserveclusterdomains")) {
Array.tabulate(docGraph.numClusterers)(i => config.corefClusters).toSeq
} else {
numLatentClustersVect
}
val latentDomainsVect = numCorefClustersVect.map(numClusters => new Domain((0 until numClusters).map(_ + "").toArray))
// First index is the cluster ID
val latentGrid = for (cid <- 0 until numClusterers) yield {
if (config.clusterFeats.contains("proj") || config.clusterFeats.contains("hard")) {
DocumentFactorGraph.makeFeatureGridEmpty(latentDomainsVect(cid))
} else if (config.clusterFeats.contains("agcustom")) {
DocumentFactorGraph.makeFeatureGridAgreeCustom(latentDomainsVect(cid), DocumentFactorGraph.LatentPairwiseFeat + "C" + cid)
} else {
DocumentFactorGraph.makeFeatureGridHalfParameterized(latentDomainsVect(cid), DocumentFactorGraph.LatentPairwiseFeat + "C" + cid)
}
}
// val latentGridsFine: Array[Array[Array[Array[Seq[String]]]]] = if (config.clusterFeats.contains("fine")) {
// Array.tabulate(MentionType.values().length, MentionType.values().length)((currTypeIdx, prevTypeIdx) => {
// DocumentFactorGraph.makeFeatureGridFineFullyParameterized(latentDomain, DocumentFactorGraph.LatentPairwiseFeat, MentionType.values()(currTypeIdx).toString(), MentionType.values()(prevTypeIdx).toString())
// });
// } else {
// null;
// }
// val latentGridIndexed = latentGrid.map(_.map(_.map(featurizer.getIndex(_, false))));
val latentGridIndexed = latentGrid.map(_.map(_.map(_.map(featurizer.getIndex(_, false)))))
// val latentGridsFineIndexed = if (config.clusterFeats.contains("fine")) {
// latentGridsFine.map(_.map(_.map(_.map(_.map(featurizer.getIndex(_, false))))));
// } else {
// null;
// }
val latentDefaultWeightsGrids = for (cid <- 0 until numClusterers) yield {
if (config.clusterFeats.contains("proj") || config.clusterFeats.contains("hard")) {
DocumentFactorGraph.makeForcedAgreementWeightsGrid(latentDomainsVect(cid).size, latentDomainsVect(cid).size)
} else {
DocumentFactorGraph.makeZeroWeightsGrid(latentDomainsVect(cid).size, latentDomainsVect(cid).size)
}
}
val latentProjClusterDomainsVect = numLatentClustersVect.map(numClusters => new Domain((0 until numClusters).map(_ + "").toArray))
val latentProjClusterGrid = for (cid <- 0 until docGraph.numClusterers) yield {
if (config.clusterFeats.contains("projagree")) {
DocumentFactorGraph.makeFeatureGridAgreeCustomOnSource(latentProjClusterDomainsVect(cid), DocumentFactorGraph.LatentProjFeat + "C" + cid)
} else if (config.clusterFeats.contains("proj")) {
DocumentFactorGraph.makeFeatureGridFullyParameterized(latentProjClusterDomainsVect(cid), latentDomainsVect(cid), DocumentFactorGraph.LatentProjFeat + "C" + cid)
} else {
DocumentFactorGraph.makeFeatureGridEmpty(latentProjClusterDomainsVect(cid))
}
}
val latentProjClusterGridIndexed = latentProjClusterGrid.map(_.map(_.map(_.map(featurizer.getIndex(_, false)))))
val latentProjClusterDefaultWeightsGrids = for (cid <- 0 until numClusterers) yield {
if (config.clusterFeats.contains("proj")) {
if (config.projDefaultWeights == "agreeheavy") {
DocumentFactorGraph.makeAgreementWeightsGrid(latentProjClusterDomainsVect(cid).size, latentDomainsVect(cid).size, 1.0)
} else if (config.projDefaultWeights == "agreelight") {
DocumentFactorGraph.makeAgreementWeightsGrid(latentProjClusterDomainsVect(cid).size, latentDomainsVect(cid).size, 0.01)
} else {
DocumentFactorGraph.makeEpsilonWeightsGrid(latentProjClusterDomainsVect(cid).size, latentDomainsVect(cid).size)
}
} else {
DocumentFactorGraph.makeForcedAgreementWeightsGrid(latentProjClusterDomainsVect(cid).size, latentDomainsVect(cid).size)
}
}
for (i <- 0 until docGraph.size()) {
val domainArr = docGraph.getPrunedDomain(i, gold)
// NODES
antecedentNodes(i) = new Node[Int](new Domain(domainArr))
if (config.clusterFeats.contains("latent")) {
for (cid <- 0 until docGraph.numClusterers) {
latentNodes(i)(cid) = new Node[String](latentDomainsVect(cid))
latentProjClusterNodes(i)(cid) = new Node[String](latentProjClusterDomainsVect(cid))
}
}
allNodes += antecedentNodes(i)
allNodesEveryIter += antecedentNodes(i)
if (config.clusterFeats.contains("latent")) {
// allNodes ++= latentNodes(i);
for (cid <- 0 until docGraph.numClusterers) {
allNodes += latentProjClusterNodes(i)(cid)
allNodes += latentNodes(i)(cid)
allNodesEveryIter += latentNodes(i)(cid)
}
}
// UNARY FACTORS
antecedentUnaryFactors(i) = new UnaryFactorOld(antecedentNodes(i))
allFactors += antecedentUnaryFactors(i)
if (config.clusterFeats.contains("latent")) {
for (cid <- 0 until docGraph.numClusterers) {
val currLatentFactor = new UnaryFactorOld(latentProjClusterNodes(i)(cid))
currLatentFactor.setUnaryFactor(docGraph.getClusterPosteriors(cid, i))
latentUnaryFactors(i)(cid) = currLatentFactor
allFactors += latentUnaryFactors(i)(cid)
}
}
if (config.clusterFeats.contains("latent")) {
for (cid <- 0 until docGraph.numClusterers) {
latentAgreementFactors(i)(cid) = new Array[Factor](i+1)
if (config.clusterFeats.contains("projfine")) {
throw new RuntimeException("Fine features no longer supported")
// val typeIndex = docGraph.getMention(i).mentionType.ordinal();
// latentProjClusterAgreementFactors(i)(cid) = new AgreementFactor(latentProjClusterNodes(i)(cid), latentNodes(i)(cid), latentProjClusterGridsFine(typeIndex)(cid), latentProjClusterGridsFineIndexed(typeIndex)(cid), latentProjClusterDefaultWeightsGrid);
} else {
latentProjClusterAgreementFactors(i)(cid) = new AgreementFactor(latentProjClusterNodes(i)(cid), latentNodes(i)(cid), latentProjClusterGrid(cid), latentProjClusterGridIndexed(cid), latentProjClusterDefaultWeightsGrids(cid))
}
allFactors += latentProjClusterAgreementFactors(i)(cid)
}
}
for (j <- domainArr) {
// Don't build a factor for a guy pointing to itself
if (j != i) {
if (config.clusterFeats.contains("latent")) {
for (cid <- 0 until docGraph.numClusterers) {
// if (config.clusterFeats.contains("fine")) {
// // Can't do projection, we require that the default weights grid is zero
// require(!config.clusterFeats.contains("proj"));
// val currMentTypeIdx = docGraph.getMention(i).mentionType.ordinal();
// val antMentTypeIdx = docGraph.getMention(j).mentionType.ordinal();
// latentAgreementFactors(i)(cid)(j) = new PropertyFactor(j, latentNodes(i)(cid), antecedentNodes(i), latentNodes(j)(cid), latentGridsFine(currMentTypeIdx)(antMentTypeIdx), latentGridsFineIndexed(currMentTypeIdx)(antMentTypeIdx), latentDefaultWeightsGrid);
// } else {
if (config.clusterFeats.contains("proj")) {
latentAgreementFactors(i)(cid)(j) = new HardPropertyFactor(j, latentNodes(i)(cid), antecedentNodes(i), latentNodes(j)(cid))
} else {
latentAgreementFactors(i)(cid)(j) = new PropertyFactor(j, latentNodes(i)(cid), antecedentNodes(i), latentNodes(j)(cid), latentGrid(cid), latentGridIndexed(cid), latentDefaultWeightsGrids(cid))
}
// }
allFactors += latentAgreementFactors(i)(cid)(j)
allFactorsEveryIter += latentAgreementFactors(i)(cid)(j)
}
}
}
}
}
val allFeatures = allFactors.flatMap(_.getAllAssociatedFeatures()).distinct
// Initialize received messages at nodes
allNodes.foreach(_.initializeReceivedMessagesUniform())
var nodeMillis = 0L
var factorMillis = 0L
Logger.logss("Document factor graph instantiated: " + docGraph.size + " mentions, " + allNodes.size + " nodes (" + allNodesEveryIter.size + " every iter), " +
allFactors.size + " factors (" + allFactorsEveryIter.size + " every iter), " + allFeatures.size + " features, <=30 of which are: " +
allFeatures.slice(0, Math.min(30, allFeatures.size)))
def setWeights(pairwiseScorer: PairwiseScorer, lossFcn: (CorefDoc, Int, Int) => Double) {
// These scores already have -Infinity whenever something has been pruned
val scoresChart = docGraph.featurizeIndexAndScoreNonPrunedUseCache(pairwiseScorer)._2
// Modify the scores to incorporate softmax-margin and whether or not we're doing gold
val antecedents = docGraph.getGoldAntecedentsUnderCurrentPruning()
for (i <- 0 until scoresChart.size) {
for (j <- 0 until scoresChart(i).size) {
if (!docGraph.isPruned(i, j)) {
if (gold) {
// For gold, need to restrict to those in the set of antecedents
if (!antecedents(i).contains(j)) {
scoresChart(i)(j) = Double.NegativeInfinity
}
} else {
// For guess, need to loss-augment
scoresChart(i)(j) += lossFcn(docGraph.corefDoc, i, j)
}
}
}
val antecedentUnaryPotential = scoresChart(i).filter((value) => !value.isNegInfinity).map(Math.exp(_))
if (antecedentUnaryPotential.reduce(_ + _) == 0) {
Logger.logss("Scores chart: " + scoresChart(i).toSeq)
Logger.logss("Ant unary pot: " + i + ": " + antecedentUnaryPotential.toSeq)
require(false)
}
antecedentUnaryFactors(i).setUnaryFactor(antecedentUnaryPotential)
}
// Update weights of the factors
for (factor <- allFactors) {
factor.setWeights(pairwiseScorer.weights)
}
// Scrub values of potentials. Can't just reset all to zero because they're
// still linked to the received messages from the previous iteration, so the
// arrays themselves need to be reinitialized.
// allNodes.map(_.resetReceivedMessages());
allNodes.foreach(_.initializeReceivedMessagesUniform())
// Send initial messages from unary factors; these don't rely
// on having received messages
antecedentUnaryFactors.foreach(_.sendMessages())
if (config.clusterFeats.contains("latent")) {
latentUnaryFactors.foreach(_.foreach(_.sendMessages()))
}
}
def passMessagesOneRound(firstOrLastIter: Boolean) {
// Nodes and factors are ordered by position in the graph so later guys get better information from earlier ones
val time1 = System.nanoTime()
for (node <- if (firstOrLastIter) allNodes else allNodesEveryIter) {
node.sendMessages()
}
val time2 = System.nanoTime()
nodeMillis += (time2 - time1) / 1000000
for (factor <- if (firstOrLastIter) allFactors else allFactorsEveryIter) {
factor.sendMessages()
}
factorMillis += (System.nanoTime() - time2) / 1000000
}
def getDenseAntecedentNodeMarginals(idx: Int): Array[Double] = {
val marginals = Array.fill(idx+1)(0.0)
val sparseMarginals = antecedentNodes(idx).getMarginals()
for (j <- 0 until sparseMarginals.size) {
marginals(antecedentNodes(idx).domain.entries(j)) = sparseMarginals(j)
}
marginals
}
def addExpectedFeatureCountsToGradient(scale: Double, gradient: Array[Double]) {
val time = System.nanoTime()
// Add pairwise features with custom machinery
// TODO: These can be incorporated into the unary factor
for (i <- 0 until docGraph.size) {
val currNodeMarginals = getDenseAntecedentNodeMarginals(i)
for (j <- 0 until currNodeMarginals.size) {
require(currNodeMarginals(j) >= 0 && currNodeMarginals(j) <= 1)
addToGradient(featsChart(i)(j), scale * currNodeMarginals(j), gradient)
}
}
for (factor <- allFactors) {
factor.addExpectedFeatureCounts(scale, gradient)
}
// Logger.logss("Marginals time: " + (System.nanoTime() - time) / 1000000 + " millis");
}
private def addToGradient(feats: Seq[Int], scale: Double, gradient: Array[Double]) {
require(!scale.isNaN() && !scale.isInfinite())
var i = 0
while (i < feats.size) {
val feat = feats(i)
gradient(feat) += 1.0 * scale
i += 1
}
}
// def computeUncertaintyStatistics(): Seq[Int] = {
// docGraph.computeUncertaintyStatistics((idx) => antecedentNodes(idx).getMarginals());
// }
//
// def computeUnkStatistics(): Seq[Int] = {
// docGraph.computeUnkStatistics();
// }
def renderLatentInfo(): String = {
""
// var latentInfo = "";
// for (i <- 0 until docGraph.size) {
// if (antecedentNodes(i).domain.size > 1) {
// // Only do things based on the first clusterer
// latentInfo += i + " original: " + GUtil.fmt(docGraph.getClusterPosteriors(0, i)) + "\\n"
// latentInfo += i + " backptrs: " + antecedentNodes(i).domain.entries.toSeq + ": " + GUtil.fmt(antecedentNodes(i).getMarginals()) + "\\n"
// latentInfo += i + " final: " + GUtil.fmt(latentNodes(i)(0).getMarginals()) + "\\n";
// } else if (antecedentNodes(i).domain.entries(0) == i) {
// latentInfo += i + " TRIVIAL original: " + GUtil.fmt(docGraph.getClusterPosteriors(0, i)) + "\\n"
// latentInfo += i + " TRIVIAL final: " + GUtil.fmt(latentNodes(i)(0).getMarginals()) + "\\n";
// }
// }
// latentInfo;
}
}
object DocumentFactorGraph {
val nerTypes = Seq("CARDINAL", "DATE", "EVENT", "FAC", "GPE", "LANGUAGE", "LAW", "LOC", "MONEY",
"ORDINAL", "ORG", "PERSON", "PRODUCT", "QUANTITY", "TIME", "WORK_OF_ART")
val nerTypesIncludingO = nerTypes ++ Seq("O")
val nertDomain = new Domain(nerTypes.toArray)
// Don't include "O", assume that it represents unknown
val LatentPairwiseFeat = "LatentPairwise"
val LatentProjFeat = "LatentProj"
def makeUnaryFeaturesAndDefaultWeights[A](domain: Domain[A], prediction: String, agreeFeat: String, disagreeFeat: String, config: CorefSystemConfiguration): (Array[Seq[String]], Array[Double]) = {
val features = new Array[Seq[String]](domain.size)
if (config.clusterFeats.contains("origcustom")) {
for (i <- 0 until domain.size) {
features(i) = Seq("ORIGCOMPATIBLE:" + domain.entries(i) + "-p=" + prediction)
}
} else { // if (config.clusterFeats.contains("origagree")) {
for (i <- 0 until domain.size) {
features(i) = if (domain.entries(i).toString() == prediction) Seq(agreeFeat) else Seq(disagreeFeat)
}
}
val weights = new Array[Double](domain.size)
for (i <- 0 until domain.size) {
weights(i) = if (config.clusterFeats.contains("origcustom")) {
if (domain.entries(i).toString() == prediction) 1.0 else 0.0
} else {
0.0
}
}
(features, weights)
}
def makeFeatureGrid[A](domain: Domain[A], overallName: String, agreeFeat: String, disagreeFeat: String, custom: Boolean): Array[Array[Seq[String]]] = {
if (custom) {
val featureGrid = new Array[Array[Seq[String]]](domain.size)
for (i <- 0 until domain.size) {
featureGrid(i) = new Array[Seq[String]](domain.size)
for (j <- 0 until domain.size) {
featureGrid(i)(j) = Seq("TRANSCOMPATIBLE-" + overallName + ":" + domain.entries(i) + "-" + domain.entries(j))
}
}
featureGrid
} else {
val featureGrid = new Array[Array[Seq[String]]](domain.size)
for (i <- 0 until domain.size) {
featureGrid(i) = new Array[Seq[String]](domain.size)
for (j <- 0 until domain.size) {
featureGrid(i)(j) = if (i == j) Seq(agreeFeat) else Seq(disagreeFeat)
}
}
featureGrid
}
}
def makeFeatureGridAgreeCustom[A](domain: Domain[A], featPrefix: String): Array[Array[Seq[String]]] = {
val featureGrid = new Array[Array[Seq[String]]](domain.size)
for (i <- 0 until domain.size) {
featureGrid(i) = new Array[Seq[String]](domain.size)
for (j <- 0 until domain.size) {
featureGrid(i)(j) = Seq(if (i == j) featPrefix + "Agree-" + i else featPrefix + "Disagree")
}
}
featureGrid
}
def makeFeatureGridAgreeCustomOnSource[A](domain: Domain[A], featPrefix: String): Array[Array[Seq[String]]] = {
val featureGrid = new Array[Array[Seq[String]]](domain.size)
for (i <- 0 until domain.size) {
featureGrid(i) = new Array[Seq[String]](domain.size)
for (j <- 0 until domain.size) {
featureGrid(i)(j) = Seq(if (i == j) featPrefix + "Agree-" + i else featPrefix + "Disagree-" + i)
}
}
featureGrid
}
def makeFeatureGridHalfParameterized[A](domain: Domain[A], featPrefix: String): Array[Array[Seq[String]]] = {
val featureGrid = new Array[Array[Seq[String]]](domain.size)
for (i <- 0 until domain.size) {
featureGrid(i) = new Array[Seq[String]](domain.size)
for (j <- 0 until domain.size) {
featureGrid(i)(j) = Seq(featPrefix + "-" + (if (i <= j) i + "-" + j else j + "-" + i))
}
}
featureGrid
}
def makeFeatureGridFullyParameterized[A](domainOne: Domain[A], domainTwo: Domain[A], featPrefix: String): Array[Array[Seq[String]]] = {
val featureGrid = new Array[Array[Seq[String]]](domainOne.size)
for (i <- 0 until domainOne.size) {
featureGrid(i) = new Array[Seq[String]](domainTwo.size)
for (j <- 0 until domainTwo.size) {
featureGrid(i)(j) = Seq(featPrefix + "-" + i + "-" + j)
}
}
featureGrid
}
def makeFeatureGridFineFullyParameterized[A](domain: Domain[A], featPrefix: String, conjCurr: String, conjPrev: String): Array[Array[Seq[String]]] = {
val featureGrid = new Array[Array[Seq[String]]](domain.size)
for (i <- 0 until domain.size) {
featureGrid(i) = new Array[Seq[String]](domain.size)
for (j <- 0 until domain.size) {
featureGrid(i)(j) = Seq(featPrefix + "-" + i + "-" + j, featPrefix + "-" + i + "-" + j + "&" + conjCurr, featPrefix + "-" + i + "-" + j + "&" + conjCurr + "&" + conjPrev)
}
}
featureGrid
}
def makeFeatureGridEmpty[A](domain: Domain[A]): Array[Array[Seq[String]]] = {
val featureGrid = new Array[Array[Seq[String]]](domain.size)
for (i <- 0 until domain.size) {
featureGrid(i) = new Array[Seq[String]](domain.size)
for (j <- 0 until domain.size) {
featureGrid(i)(j) = Seq[String]()
}
}
featureGrid
}
// def makeFeatureGridFullyParameterizedConjunctions[A](domain: Domain[A], agreeFeat: String, currMentType: MentionType, antMentType: MentionType): Array[Array[Seq[String]]] = {
// val featureGrid = new Array[Array[Seq[String]]](domain.size);
// for (i <- 0 until domain.size) {
// featureGrid(i) = new Array[Seq[String]](domain.size);
// for (j <- 0 until domain.size) {
// featureGrid(i)(j) = Seq(agreeFeat + (if (i <= j) i + "-" + j else j + "-" + i));
// featureGrid(i)(j) = Seq(agreeFeat + i + "-" + j,
// agreeFeat + i + "-" + j + "-Curr=" + currMentType.toString,
// agreeFeat + i + "-" + j + "-Curr=" + currMentType.toString + "-Prev=" + antMentType.toString);
// }
// }
// featureGrid;
// }
def makeZeroWeightsGrid(dim1: Int, dim2: Int): Array[Array[Double]] = {
Array.tabulate(dim1, dim2)((i, j) => 0.0)
}
def makeForcedAgreementWeightsGrid(dim1: Int, dim2: Int): Array[Array[Double]] = {
require(dim1 == dim2, "Can only force agreement on square matrix")
Array.tabulate(dim1, dim2)((i, j) => if (i == j) 0.0 else Double.NegativeInfinity)
}
def makeAgreementWeightsGrid(dim1: Int, dim2: Int, agreementWeight: Double): Array[Array[Double]] = {
require(dim1 == dim2, "Can only have agreement on square matrix")
Array.tabulate(dim1, dim2)((i, j) => if (i == j) agreementWeight else 0.0)
}
def makeEpsilonWeightsGrid(dim1: Int, dim2: Int): Array[Array[Double]] = {
val rand = new Random(0)
Array.tabulate(dim1, dim2)((i, j) => (rand.nextDouble() - 0.5) * 0.01)
}
}
|
timfeu/berkeleycoref-thesaurus
|
src/main/java/edu/berkeley/nlp/coref/bp/DocumentFactorGraph.scala
|
Scala
|
gpl-3.0
| 22,432 |
package gapt
package object utils {
def unorderedPairsOf[T]( elements: Iterable[T] ): Iterable[( T, T )] = {
val elementsWithIndex = elements.zipWithIndex
for {
( e1, i1 ) <- elementsWithIndex
( e2, i2 ) <- elementsWithIndex
if i1 < i2
} yield ( e1, e2 )
}
def crossProduct[T]( xs: Seq[Iterable[T]] ): Iterable[Seq[T]] =
xs match {
case Seq() => Seq( Seq() )
case Seq( x, xss @ _* ) => for { y <- x; ys <- crossProduct( xss ) } yield y +: ys
}
}
|
gapt/gapt
|
core/src/main/scala/gapt/utils/package.scala
|
Scala
|
gpl-3.0
| 518 |
package com.autodesk.tct.utilities
import java.io._
/**
* Helper object that reads dist file
*/
object DistProvider {
/**
* Reads the dist file. Note: this function does not automatically close the InputStream
*
* @param pathFileName the dist file path
* @return the InputStream and its size.
*/
def read(pathFileName: String): (InputStream, Long) = {
val file = new File(pathFileName)
val inputStream = new FileInputStream(file)
(inputStream, file.length())
}
}
|
adsk-cp-tct/challenger-backend
|
app/com/autodesk/tct/utilities/DistProvider.scala
|
Scala
|
gpl-3.0
| 500 |
/*
* Copyright 2011-2022 GatlingCorp (https://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.charts.stats.buffers
import scala.collection.mutable
import io.gatling.charts.stats.{ GroupRecord, RequestRecord }
import io.gatling.commons.shared.unstable.model.stats.Group
import io.gatling.commons.stats.{ KO, Status }
import io.gatling.core.config.GatlingConfiguration
private[stats] trait ResponseTimeRangeBuffers {
val responseTimeRangeBuffers: mutable.Map[BufferKey, ResponseTimeRangeBuffer] = mutable.Map.empty
def getResponseTimeRangeBuffers(requestName: Option[String], group: Option[Group])(implicit configuration: GatlingConfiguration): ResponseTimeRangeBuffer =
responseTimeRangeBuffers.getOrElseUpdate(BufferKey(requestName, group, None), new ResponseTimeRangeBuffer)
def updateResponseTimeRangeBuffer(record: RequestRecord)(implicit configuration: GatlingConfiguration): Unit = {
import record._
getResponseTimeRangeBuffers(Some(name), group).update(responseTime, status)
getResponseTimeRangeBuffers(None, None).update(responseTime, status)
}
def updateGroupResponseTimeRangeBuffer(record: GroupRecord)(implicit configuration: GatlingConfiguration): Unit =
getResponseTimeRangeBuffers(None, Some(record.group)).update(record.duration, record.status)
class ResponseTimeRangeBuffer(implicit configuration: GatlingConfiguration) {
var low: Int = 0
var middle: Int = 0
var high: Int = 0
var ko: Int = 0
def update(time: Int, status: Status): Unit = {
if (status == KO) ko += 1
else if (time < configuration.charting.indicators.lowerBound) low += 1
else if (time > configuration.charting.indicators.higherBound) high += 1
else middle += 1
}
}
}
|
gatling/gatling
|
gatling-charts/src/main/scala/io/gatling/charts/stats/buffers/ResponseTimeRangeBuffers.scala
|
Scala
|
apache-2.0
| 2,286 |
package tastytest
object TestProd extends Suite("TestProd") {
val Some(one) = Prod(1)
test("product * empty === empty") {
val Some(product) = Prod(getRandomPos)
assert(product * Prod.empty === Prod.empty)
}
test("product * one === product") {
val Some(product) = Prod(getRandomPos)
assert(product * one === product)
}
test("product mul empty === empty") {
val Some(product) = Prod(getRandomPos)
assert((product `mul` Prod.empty) === Prod.empty)
}
test("product mul one === product") {
val Some(product) = Prod(getRandomPos)
assert((product `mul` one) === product)
}
override val reps = 1_000_000
}
|
scala/scala
|
test/tasty/run/src-2/tastytest/TestProd.scala
|
Scala
|
apache-2.0
| 657 |
/*
* Copyright 2015-2016 IBM Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package whisk.core.database
import scala.concurrent.Future
import akka.actor.ActorSystem
import akka.http.scaladsl.model.HttpMethods
import akka.http.scaladsl.model.StatusCode
import spray.json._
/**
* This class only handles the basic communication to the proper endpoints
* ("JSON in, JSON out"). It is up to its clients to interpret the results.
*/
class CloudantRestClient(host: String, port: Int, username: String, password: String, db: String)(implicit system: ActorSystem)
extends CouchDbRestClient("https", host, port, username, password, db) {
// https://cloudant.com/blog/cloudant-query-grows-up-to-handle-ad-hoc-queries/#.VvllCD-0z2C
def simpleQuery(doc: JsObject): Future[Either[StatusCode, JsObject]] = {
requestJson(mkJsonRequest(HttpMethods.POST, uri(db, "_find"), doc))
}
}
|
nwspeete-ibm/openwhisk
|
common/scala/src/main/scala/whisk/core/database/CloudantRestClient.scala
|
Scala
|
apache-2.0
| 1,424 |
package us.bleibinha.scalaredisexample
import org.scalatest.FunSpec
class HelloWorldSpec extends FunSpec {
describe("Adding 1 to 1") {
it("should equals 2"){
assert(1+1 == 2)
}
}
}
|
ExNexu/scala-redis-example
|
src/test/scala/us/bleibinha/scalaredisexample/HelloWorldSpec.scala
|
Scala
|
bsd-3-clause
| 202 |
package ch.ninecode.model
import com.esotericsoftware.kryo.Kryo
import com.esotericsoftware.kryo.Serializer
import com.esotericsoftware.kryo.io.Input
import com.esotericsoftware.kryo.io.Output
import org.apache.spark.sql.Row
import ch.ninecode.cim.CIMClassInfo
import ch.ninecode.cim.CIMContext
import ch.ninecode.cim.CIMParseable
import ch.ninecode.cim.CIMRelationship
import ch.ninecode.cim.CIMSerializer
/**
* Aggregate loads are used to represent all or part of the real and reactive load from one or more loads in the static (power flow) data.
*
* This load is usually the aggregation of many individual load devices and the load model is an approximate representation of the aggregate response of the load devices to system disturbances.
* Standard aggregate load model comprised of static and/or dynamic components. A static load model represents the sensitivity of the real and reactive power consumed by the load to the amplitude and frequency of the bus voltage. A dynamic load model can be used to represent the aggregate response of the motor components of the load.
*
* @param LoadDynamics [[ch.ninecode.model.LoadDynamics LoadDynamics]] Reference to the superclass object.
* @param LoadMotor [[ch.ninecode.model.LoadMotor LoadMotor]] Aggregate motor (dynamic) load associated with this aggregate load.
* @param LoadStatic [[ch.ninecode.model.LoadStatic LoadStatic]] Aggregate static load associated with this aggregate load.
* @group LoadDynamics
* @groupname LoadDynamics Package LoadDynamics
* @groupdesc LoadDynamics Dynamic load models are used to represent the dynamic real and reactive load behaviour of a load from the static power flow model.
* Dynamic load models can be defined as applying either to a single load (energy consumer) or to a group of energy consumers.
* Large industrial motors or groups of similar motors can be represented by a synchronous machine model (SynchronousMachineDynamics) or an asynchronous machine model (AsynchronousMachineDynamics), which are usually represented as generators with negative active power output in the static (power flow) data.
*/
final case class LoadAggregate
(
LoadDynamics: LoadDynamics = null,
LoadMotor: String = null,
LoadStatic: String = null
)
extends
Element
{
/**
* Return the superclass object.
*
* @return The typed superclass nested object.
* @group Hierarchy
* @groupname Hierarchy Class Hierarchy Related
* @groupdesc Hierarchy Members related to the nested hierarchy of CIM classes.
*/
override def sup: LoadDynamics = LoadDynamics
//
// Row overrides
//
/**
* Return a copy of this object as a Row.
*
* Creates a clone of this object for use in Row manipulations.
*
* @return The copy of the object.
* @group Row
* @groupname Row SQL Row Implementation
* @groupdesc Row Members related to implementing the SQL Row interface
*/
override def copy (): Row =
{
clone().asInstanceOf[Row]
}
override def export_fields: String =
{
implicit val s: StringBuilder = new StringBuilder(sup.export_fields)
implicit val clz: String = LoadAggregate.cls
def emitattr (position: Int, value: Any): Unit = if (mask(position)) emit_attribute(LoadAggregate.fields(position), value)
emitattr(0, LoadMotor)
emitattr(1, LoadStatic)
s.toString
}
override def export: String =
{
"\\t<cim:LoadAggregate rdf:%s=\\"%s\\">\\n%s\\t</cim:LoadAggregate>".format(if (about) "about" else "ID", id, export_fields)
}
}
object LoadAggregate
extends
CIMParseable[LoadAggregate]
{
override val fields: Array[String] = Array[String](
"LoadMotor",
"LoadStatic"
)
override val relations: List[CIMRelationship] = List(
CIMRelationship("LoadMotor", "LoadMotor", "0..1", "1"),
CIMRelationship("LoadStatic", "LoadStatic", "0..1", "1")
)
val LoadMotor: Fielder = parse_attribute(attribute(cls, fields(0)))
val LoadStatic: Fielder = parse_attribute(attribute(cls, fields(1)))
def parse (context: CIMContext): LoadAggregate =
{
implicit val ctx: CIMContext = context
implicit val bitfields: Array[Int] = Array(0)
val ret = LoadAggregate(
LoadDynamics.parse(context),
mask(LoadMotor(), 0),
mask(LoadStatic(), 1)
)
ret.bitfields = bitfields
ret
}
def serializer: Serializer[LoadAggregate] = LoadAggregateSerializer
}
object LoadAggregateSerializer extends CIMSerializer[LoadAggregate]
{
def write (kryo: Kryo, output: Output, obj: LoadAggregate): Unit =
{
val toSerialize: Array[() => Unit] = Array(
() => output.writeString(obj.LoadMotor),
() => output.writeString(obj.LoadStatic)
)
LoadDynamicsSerializer.write(kryo, output, obj.sup)
implicit val bitfields: Array[Int] = obj.bitfields
writeBitfields(output)
writeFields(toSerialize)
}
def read (kryo: Kryo, input: Input, cls: Class[LoadAggregate]): LoadAggregate =
{
val parent = LoadDynamicsSerializer.read(kryo, input, classOf[LoadDynamics])
implicit val bitfields: Array[Int] = readBitfields(input)
val obj = LoadAggregate(
parent,
if (isSet(0)) input.readString else null,
if (isSet(1)) input.readString else null
)
obj.bitfields = bitfields
obj
}
}
/**
* Combined static load and induction motor load effects.
*
* The dynamics of the motor are simplified by linearizing the induction machine equations.
*
* @param LoadDynamics [[ch.ninecode.model.LoadDynamics LoadDynamics]] Reference to the superclass object.
* @param epfd Active load-frequency dependence index (dynamic) (<i>Epfd</i>).
* Typical value = 1,5.
* @param epfs Active load-frequency dependence index (static) (<i>Epfs</i>).
* Typical value = 1,5.
* @param epvd Active load-voltage dependence index (dynamic) (<i>Epvd</i>).
* Typical value = 0,7.
* @param epvs Active load-voltage dependence index (static) (<i>Epvs</i>).
* Typical value = 0,7.
* @param eqfd Reactive load-frequency dependence index (dynamic) (<i>Eqfd</i>).
* Typical value = 0.
* @param eqfs Reactive load-frequency dependence index (static) (<i>Eqfs</i>).
* Typical value = 0.
* @param eqvd Reactive load-voltage dependence index (dynamic) (<i>Eqvd</i>).
* Typical value = 2.
* @param eqvs Reactive load-voltage dependence index (static) (<i>Eqvs</i>).
* Typical value = 2.
* @param h Inertia constant (<i>H</i>) (>= 0).
* Typical value = 2,5.
* @param lfac Loading factor (<i>L</i><i><sub>fac</sub></i>).
* The ratio of initial <i>P</i> to motor MVA base. Typical value = 0,8.
* @param pfrac Fraction of constant-power load to be represented by this motor model (<i>P</i><i><sub>FRAC</sub></i>) (>= 0,0 and <= 1,0).
* Typical value = 0,5.
* @group LoadDynamics
* @groupname LoadDynamics Package LoadDynamics
* @groupdesc LoadDynamics Dynamic load models are used to represent the dynamic real and reactive load behaviour of a load from the static power flow model.
* Dynamic load models can be defined as applying either to a single load (energy consumer) or to a group of energy consumers.
* Large industrial motors or groups of similar motors can be represented by a synchronous machine model (SynchronousMachineDynamics) or an asynchronous machine model (AsynchronousMachineDynamics), which are usually represented as generators with negative active power output in the static (power flow) data.
*/
final case class LoadComposite
(
LoadDynamics: LoadDynamics = null,
epfd: Double = 0.0,
epfs: Double = 0.0,
epvd: Double = 0.0,
epvs: Double = 0.0,
eqfd: Double = 0.0,
eqfs: Double = 0.0,
eqvd: Double = 0.0,
eqvs: Double = 0.0,
h: Double = 0.0,
lfac: Double = 0.0,
pfrac: Double = 0.0
)
extends
Element
{
/**
* Return the superclass object.
*
* @return The typed superclass nested object.
* @group Hierarchy
* @groupname Hierarchy Class Hierarchy Related
* @groupdesc Hierarchy Members related to the nested hierarchy of CIM classes.
*/
override def sup: LoadDynamics = LoadDynamics
//
// Row overrides
//
/**
* Return a copy of this object as a Row.
*
* Creates a clone of this object for use in Row manipulations.
*
* @return The copy of the object.
* @group Row
* @groupname Row SQL Row Implementation
* @groupdesc Row Members related to implementing the SQL Row interface
*/
override def copy (): Row =
{
clone().asInstanceOf[Row]
}
override def export_fields: String =
{
implicit val s: StringBuilder = new StringBuilder(sup.export_fields)
implicit val clz: String = LoadComposite.cls
def emitelem (position: Int, value: Any): Unit = if (mask(position)) emit_element(LoadComposite.fields(position), value)
emitelem(0, epfd)
emitelem(1, epfs)
emitelem(2, epvd)
emitelem(3, epvs)
emitelem(4, eqfd)
emitelem(5, eqfs)
emitelem(6, eqvd)
emitelem(7, eqvs)
emitelem(8, h)
emitelem(9, lfac)
emitelem(10, pfrac)
s.toString
}
override def export: String =
{
"\\t<cim:LoadComposite rdf:%s=\\"%s\\">\\n%s\\t</cim:LoadComposite>".format(if (about) "about" else "ID", id, export_fields)
}
}
object LoadComposite
extends
CIMParseable[LoadComposite]
{
override val fields: Array[String] = Array[String](
"epfd",
"epfs",
"epvd",
"epvs",
"eqfd",
"eqfs",
"eqvd",
"eqvs",
"h",
"lfac",
"pfrac"
)
val epfd: Fielder = parse_element(element(cls, fields(0)))
val epfs: Fielder = parse_element(element(cls, fields(1)))
val epvd: Fielder = parse_element(element(cls, fields(2)))
val epvs: Fielder = parse_element(element(cls, fields(3)))
val eqfd: Fielder = parse_element(element(cls, fields(4)))
val eqfs: Fielder = parse_element(element(cls, fields(5)))
val eqvd: Fielder = parse_element(element(cls, fields(6)))
val eqvs: Fielder = parse_element(element(cls, fields(7)))
val h: Fielder = parse_element(element(cls, fields(8)))
val lfac: Fielder = parse_element(element(cls, fields(9)))
val pfrac: Fielder = parse_element(element(cls, fields(10)))
def parse (context: CIMContext): LoadComposite =
{
implicit val ctx: CIMContext = context
implicit val bitfields: Array[Int] = Array(0)
val ret = LoadComposite(
LoadDynamics.parse(context),
toDouble(mask(epfd(), 0)),
toDouble(mask(epfs(), 1)),
toDouble(mask(epvd(), 2)),
toDouble(mask(epvs(), 3)),
toDouble(mask(eqfd(), 4)),
toDouble(mask(eqfs(), 5)),
toDouble(mask(eqvd(), 6)),
toDouble(mask(eqvs(), 7)),
toDouble(mask(h(), 8)),
toDouble(mask(lfac(), 9)),
toDouble(mask(pfrac(), 10))
)
ret.bitfields = bitfields
ret
}
def serializer: Serializer[LoadComposite] = LoadCompositeSerializer
}
object LoadCompositeSerializer extends CIMSerializer[LoadComposite]
{
def write (kryo: Kryo, output: Output, obj: LoadComposite): Unit =
{
val toSerialize: Array[() => Unit] = Array(
() => output.writeDouble(obj.epfd),
() => output.writeDouble(obj.epfs),
() => output.writeDouble(obj.epvd),
() => output.writeDouble(obj.epvs),
() => output.writeDouble(obj.eqfd),
() => output.writeDouble(obj.eqfs),
() => output.writeDouble(obj.eqvd),
() => output.writeDouble(obj.eqvs),
() => output.writeDouble(obj.h),
() => output.writeDouble(obj.lfac),
() => output.writeDouble(obj.pfrac)
)
LoadDynamicsSerializer.write(kryo, output, obj.sup)
implicit val bitfields: Array[Int] = obj.bitfields
writeBitfields(output)
writeFields(toSerialize)
}
def read (kryo: Kryo, input: Input, cls: Class[LoadComposite]): LoadComposite =
{
val parent = LoadDynamicsSerializer.read(kryo, input, classOf[LoadDynamics])
implicit val bitfields: Array[Int] = readBitfields(input)
val obj = LoadComposite(
parent,
if (isSet(0)) input.readDouble else 0.0,
if (isSet(1)) input.readDouble else 0.0,
if (isSet(2)) input.readDouble else 0.0,
if (isSet(3)) input.readDouble else 0.0,
if (isSet(4)) input.readDouble else 0.0,
if (isSet(5)) input.readDouble else 0.0,
if (isSet(6)) input.readDouble else 0.0,
if (isSet(7)) input.readDouble else 0.0,
if (isSet(8)) input.readDouble else 0.0,
if (isSet(9)) input.readDouble else 0.0,
if (isSet(10)) input.readDouble else 0.0
)
obj.bitfields = bitfields
obj
}
}
/**
* Load whose behaviour is described by reference to a standard model <font color="#0f0f0f">or by definition of a user-defined model.</font>
* A standard feature of dynamic load behaviour modelling is the ability to associate the same behaviour to multiple energy consumers by means of a single load definition.
*
* The load model is always applied to individual bus loads (energy consumers).
*
* @param IdentifiedObject [[ch.ninecode.model.IdentifiedObject IdentifiedObject]] Reference to the superclass object.
* @param EnergyConsumer [[ch.ninecode.model.EnergyConsumer EnergyConsumer]] Energy consumer to which this dynamics load model applies.
* @group LoadDynamics
* @groupname LoadDynamics Package LoadDynamics
* @groupdesc LoadDynamics Dynamic load models are used to represent the dynamic real and reactive load behaviour of a load from the static power flow model.
* Dynamic load models can be defined as applying either to a single load (energy consumer) or to a group of energy consumers.
* Large industrial motors or groups of similar motors can be represented by a synchronous machine model (SynchronousMachineDynamics) or an asynchronous machine model (AsynchronousMachineDynamics), which are usually represented as generators with negative active power output in the static (power flow) data.
*/
final case class LoadDynamics
(
IdentifiedObject: IdentifiedObject = null,
EnergyConsumer: List[String] = null
)
extends
Element
{
/**
* Return the superclass object.
*
* @return The typed superclass nested object.
* @group Hierarchy
* @groupname Hierarchy Class Hierarchy Related
* @groupdesc Hierarchy Members related to the nested hierarchy of CIM classes.
*/
override def sup: IdentifiedObject = IdentifiedObject
//
// Row overrides
//
/**
* Return a copy of this object as a Row.
*
* Creates a clone of this object for use in Row manipulations.
*
* @return The copy of the object.
* @group Row
* @groupname Row SQL Row Implementation
* @groupdesc Row Members related to implementing the SQL Row interface
*/
override def copy (): Row =
{
clone().asInstanceOf[Row]
}
override def export_fields: String =
{
implicit val s: StringBuilder = new StringBuilder(sup.export_fields)
implicit val clz: String = LoadDynamics.cls
def emitattrs (position: Int, value: List[String]): Unit = if (mask(position) && (null != value)) value.foreach(x => emit_attribute(LoadDynamics.fields(position), x))
emitattrs(0, EnergyConsumer)
s.toString
}
override def export: String =
{
"\\t<cim:LoadDynamics rdf:%s=\\"%s\\">\\n%s\\t</cim:LoadDynamics>".format(if (about) "about" else "ID", id, export_fields)
}
}
object LoadDynamics
extends
CIMParseable[LoadDynamics]
{
override val fields: Array[String] = Array[String](
"EnergyConsumer"
)
override val relations: List[CIMRelationship] = List(
CIMRelationship("EnergyConsumer", "EnergyConsumer", "0..*", "0..1")
)
val EnergyConsumer: FielderMultiple = parse_attributes(attribute(cls, fields(0)))
def parse (context: CIMContext): LoadDynamics =
{
implicit val ctx: CIMContext = context
implicit val bitfields: Array[Int] = Array(0)
val ret = LoadDynamics(
IdentifiedObject.parse(context),
masks(EnergyConsumer(), 0)
)
ret.bitfields = bitfields
ret
}
def serializer: Serializer[LoadDynamics] = LoadDynamicsSerializer
}
object LoadDynamicsSerializer extends CIMSerializer[LoadDynamics]
{
def write (kryo: Kryo, output: Output, obj: LoadDynamics): Unit =
{
val toSerialize: Array[() => Unit] = Array(
() => writeList(obj.EnergyConsumer, output)
)
IdentifiedObjectSerializer.write(kryo, output, obj.sup)
implicit val bitfields: Array[Int] = obj.bitfields
writeBitfields(output)
writeFields(toSerialize)
}
def read (kryo: Kryo, input: Input, cls: Class[LoadDynamics]): LoadDynamics =
{
val parent = IdentifiedObjectSerializer.read(kryo, input, classOf[IdentifiedObject])
implicit val bitfields: Array[Int] = readBitfields(input)
val obj = LoadDynamics(
parent,
if (isSet(0)) readList(input) else null
)
obj.bitfields = bitfields
obj
}
}
/**
* Generic non-linear dynamic (GNLD) load.
*
* This model can be used in mid-term and long-term voltage stability simulations (i.e., to study voltage collapse), as it can replace a more detailed representation of aggregate load, including induction motors, thermostatically controlled and static loads.
*
* @param LoadDynamics [[ch.ninecode.model.LoadDynamics LoadDynamics]] Reference to the superclass object.
* @param bs Steady state voltage index for reactive power (<i>BS</i>).
* @param bt Transient voltage index for reactive power (<i>BT</i>).
* @param genericNonLinearLoadModelType Type of generic non-linear load model.
* @param ls Steady state voltage index for active power (<i>LS</i>).
* @param lt Transient voltage index for active power (<i>LT</i>).
* @param tp Time constant of lag function of active power (<i>T</i><i><sub>P</sub></i>) (> 0).
* @param tq Time constant of lag function of reactive power (<i>T</i><i><sub>Q</sub></i>) (> 0).
* @group LoadDynamics
* @groupname LoadDynamics Package LoadDynamics
* @groupdesc LoadDynamics Dynamic load models are used to represent the dynamic real and reactive load behaviour of a load from the static power flow model.
* Dynamic load models can be defined as applying either to a single load (energy consumer) or to a group of energy consumers.
* Large industrial motors or groups of similar motors can be represented by a synchronous machine model (SynchronousMachineDynamics) or an asynchronous machine model (AsynchronousMachineDynamics), which are usually represented as generators with negative active power output in the static (power flow) data.
*/
final case class LoadGenericNonLinear
(
LoadDynamics: LoadDynamics = null,
bs: Double = 0.0,
bt: Double = 0.0,
genericNonLinearLoadModelType: String = null,
ls: Double = 0.0,
lt: Double = 0.0,
tp: Double = 0.0,
tq: Double = 0.0
)
extends
Element
{
/**
* Return the superclass object.
*
* @return The typed superclass nested object.
* @group Hierarchy
* @groupname Hierarchy Class Hierarchy Related
* @groupdesc Hierarchy Members related to the nested hierarchy of CIM classes.
*/
override def sup: LoadDynamics = LoadDynamics
//
// Row overrides
//
/**
* Return a copy of this object as a Row.
*
* Creates a clone of this object for use in Row manipulations.
*
* @return The copy of the object.
* @group Row
* @groupname Row SQL Row Implementation
* @groupdesc Row Members related to implementing the SQL Row interface
*/
override def copy (): Row =
{
clone().asInstanceOf[Row]
}
override def export_fields: String =
{
implicit val s: StringBuilder = new StringBuilder(sup.export_fields)
implicit val clz: String = LoadGenericNonLinear.cls
def emitelem (position: Int, value: Any): Unit = if (mask(position)) emit_element(LoadGenericNonLinear.fields(position), value)
def emitattr (position: Int, value: Any): Unit = if (mask(position)) emit_attribute(LoadGenericNonLinear.fields(position), value)
emitelem(0, bs)
emitelem(1, bt)
emitattr(2, genericNonLinearLoadModelType)
emitelem(3, ls)
emitelem(4, lt)
emitelem(5, tp)
emitelem(6, tq)
s.toString
}
override def export: String =
{
"\\t<cim:LoadGenericNonLinear rdf:%s=\\"%s\\">\\n%s\\t</cim:LoadGenericNonLinear>".format(if (about) "about" else "ID", id, export_fields)
}
}
object LoadGenericNonLinear
extends
CIMParseable[LoadGenericNonLinear]
{
override val fields: Array[String] = Array[String](
"bs",
"bt",
"genericNonLinearLoadModelType",
"ls",
"lt",
"tp",
"tq"
)
val bs: Fielder = parse_element(element(cls, fields(0)))
val bt: Fielder = parse_element(element(cls, fields(1)))
val genericNonLinearLoadModelType: Fielder = parse_attribute(attribute(cls, fields(2)))
val ls: Fielder = parse_element(element(cls, fields(3)))
val lt: Fielder = parse_element(element(cls, fields(4)))
val tp: Fielder = parse_element(element(cls, fields(5)))
val tq: Fielder = parse_element(element(cls, fields(6)))
def parse (context: CIMContext): LoadGenericNonLinear =
{
implicit val ctx: CIMContext = context
implicit val bitfields: Array[Int] = Array(0)
val ret = LoadGenericNonLinear(
LoadDynamics.parse(context),
toDouble(mask(bs(), 0)),
toDouble(mask(bt(), 1)),
mask(genericNonLinearLoadModelType(), 2),
toDouble(mask(ls(), 3)),
toDouble(mask(lt(), 4)),
toDouble(mask(tp(), 5)),
toDouble(mask(tq(), 6))
)
ret.bitfields = bitfields
ret
}
def serializer: Serializer[LoadGenericNonLinear] = LoadGenericNonLinearSerializer
}
object LoadGenericNonLinearSerializer extends CIMSerializer[LoadGenericNonLinear]
{
def write (kryo: Kryo, output: Output, obj: LoadGenericNonLinear): Unit =
{
val toSerialize: Array[() => Unit] = Array(
() => output.writeDouble(obj.bs),
() => output.writeDouble(obj.bt),
() => output.writeString(obj.genericNonLinearLoadModelType),
() => output.writeDouble(obj.ls),
() => output.writeDouble(obj.lt),
() => output.writeDouble(obj.tp),
() => output.writeDouble(obj.tq)
)
LoadDynamicsSerializer.write(kryo, output, obj.sup)
implicit val bitfields: Array[Int] = obj.bitfields
writeBitfields(output)
writeFields(toSerialize)
}
def read (kryo: Kryo, input: Input, cls: Class[LoadGenericNonLinear]): LoadGenericNonLinear =
{
val parent = LoadDynamicsSerializer.read(kryo, input, classOf[LoadDynamics])
implicit val bitfields: Array[Int] = readBitfields(input)
val obj = LoadGenericNonLinear(
parent,
if (isSet(0)) input.readDouble else 0.0,
if (isSet(1)) input.readDouble else 0.0,
if (isSet(2)) input.readString else null,
if (isSet(3)) input.readDouble else 0.0,
if (isSet(4)) input.readDouble else 0.0,
if (isSet(5)) input.readDouble else 0.0,
if (isSet(6)) input.readDouble else 0.0
)
obj.bitfields = bitfields
obj
}
}
/**
* Aggregate induction motor load.
*
* This model is used to represent a fraction of an ordinary load as "induction motor load". It allows a load that is treated as an ordinary constant power in power flow analysis to be represented by an induction motor in dynamic simulation. This model is intended for representation of aggregations of many motors dispersed through a load represented at a high voltage bus but where there is no information on the characteristics of individual motors.
* Either a "one-cage" or "two-cage" model of the induction machine can be modelled. Magnetic saturation is not modelled.
* This model treats a fraction of the constant power part of a load as a motor. During initialisation, the initial power drawn by the motor is set equal to <i>Pfrac</i> times the constant <i>P</i> part of the static load. The remainder of the load is left as a static load.
* The reactive power demand of the motor is calculated during initialisation as a function of voltage at the load bus. This reactive power demand can be less than or greater than the constant <i>Q</i> component of the load. If the motor's reactive demand is greater than the constant <i>Q</i> component of the load, the model inserts a shunt capacitor at the terminal of the motor to bring its reactive demand down to equal the constant <i>Q</i> reactive load.
* If an induction motor load model and a static load model are both present for a load, the motor <i>Pfrac</i> is assumed to be subtracted from the power flow constant <i>P</i> load before the static load model is applied. The remainder of the load, if any, is then represented by the static load model.
*
* @param IdentifiedObject [[ch.ninecode.model.IdentifiedObject IdentifiedObject]] Reference to the superclass object.
* @param d Damping factor (<i>D</i>).
* Unit = delta <i>P</i>/delta speed. Typical value = 2.
* @param h Inertia constant (<i>H</i>) (>= 0).
* Typical value = 0,4.
* @param lfac Loading factor (<i>Lfac</i>).
* The ratio of initial <i>P</i> to motor MVA base. Typical value = 0,8.
* @param lp Transient reactance (<i>Lp</i>).
* Typical value = 0,15.
* @param lpp Subtransient reactance (<i>Lpp</i>).
* Typical value = 0,15.
* @param ls Synchronous reactance (<i>Ls</i>).
* Typical value = 3,2.
* @param pfrac Fraction of constant-power load to be represented by this motor model (<i>Pfrac</i>) (>= 0,0 and <= 1,0).
* Typical value = 0,3.
* @param ra Stator resistance (<i>Ra</i>).
* Typical value = 0.
* @param tbkr Circuit breaker operating time (<i>Tbkr</i>) (>= 0).
* Typical value = 0,08.
* @param tpo Transient rotor time constant (<i>Tpo</i>) (>= 0).
* Typical value = 1.
* @param tppo Subtransient rotor time constant (<i>Tppo</i>) (>= 0).
* Typical value = 0,02.
* @param tv Voltage trip pickup time (<i>Tv</i>) (>= 0).
* Typical value = 0,1.
* @param vt Voltage threshold for tripping (<i>Vt</i>).
* Typical value = 0,7.
* @param LoadAggregate [[ch.ninecode.model.LoadAggregate LoadAggregate]] Aggregate load to which this aggregate motor (dynamic) load belongs.
* @group LoadDynamics
* @groupname LoadDynamics Package LoadDynamics
* @groupdesc LoadDynamics Dynamic load models are used to represent the dynamic real and reactive load behaviour of a load from the static power flow model.
* Dynamic load models can be defined as applying either to a single load (energy consumer) or to a group of energy consumers.
* Large industrial motors or groups of similar motors can be represented by a synchronous machine model (SynchronousMachineDynamics) or an asynchronous machine model (AsynchronousMachineDynamics), which are usually represented as generators with negative active power output in the static (power flow) data.
*/
final case class LoadMotor
(
IdentifiedObject: IdentifiedObject = null,
d: Double = 0.0,
h: Double = 0.0,
lfac: Double = 0.0,
lp: Double = 0.0,
lpp: Double = 0.0,
ls: Double = 0.0,
pfrac: Double = 0.0,
ra: Double = 0.0,
tbkr: Double = 0.0,
tpo: Double = 0.0,
tppo: Double = 0.0,
tv: Double = 0.0,
vt: Double = 0.0,
LoadAggregate: String = null
)
extends
Element
{
/**
* Return the superclass object.
*
* @return The typed superclass nested object.
* @group Hierarchy
* @groupname Hierarchy Class Hierarchy Related
* @groupdesc Hierarchy Members related to the nested hierarchy of CIM classes.
*/
override def sup: IdentifiedObject = IdentifiedObject
//
// Row overrides
//
/**
* Return a copy of this object as a Row.
*
* Creates a clone of this object for use in Row manipulations.
*
* @return The copy of the object.
* @group Row
* @groupname Row SQL Row Implementation
* @groupdesc Row Members related to implementing the SQL Row interface
*/
override def copy (): Row =
{
clone().asInstanceOf[Row]
}
override def export_fields: String =
{
implicit val s: StringBuilder = new StringBuilder(sup.export_fields)
implicit val clz: String = LoadMotor.cls
def emitelem (position: Int, value: Any): Unit = if (mask(position)) emit_element(LoadMotor.fields(position), value)
def emitattr (position: Int, value: Any): Unit = if (mask(position)) emit_attribute(LoadMotor.fields(position), value)
emitelem(0, d)
emitelem(1, h)
emitelem(2, lfac)
emitelem(3, lp)
emitelem(4, lpp)
emitelem(5, ls)
emitelem(6, pfrac)
emitelem(7, ra)
emitelem(8, tbkr)
emitelem(9, tpo)
emitelem(10, tppo)
emitelem(11, tv)
emitelem(12, vt)
emitattr(13, LoadAggregate)
s.toString
}
override def export: String =
{
"\\t<cim:LoadMotor rdf:%s=\\"%s\\">\\n%s\\t</cim:LoadMotor>".format(if (about) "about" else "ID", id, export_fields)
}
}
object LoadMotor
extends
CIMParseable[LoadMotor]
{
override val fields: Array[String] = Array[String](
"d",
"h",
"lfac",
"lp",
"lpp",
"ls",
"pfrac",
"ra",
"tbkr",
"tpo",
"tppo",
"tv",
"vt",
"LoadAggregate"
)
override val relations: List[CIMRelationship] = List(
CIMRelationship("LoadAggregate", "LoadAggregate", "1", "0..1")
)
val d: Fielder = parse_element(element(cls, fields(0)))
val h: Fielder = parse_element(element(cls, fields(1)))
val lfac: Fielder = parse_element(element(cls, fields(2)))
val lp: Fielder = parse_element(element(cls, fields(3)))
val lpp: Fielder = parse_element(element(cls, fields(4)))
val ls: Fielder = parse_element(element(cls, fields(5)))
val pfrac: Fielder = parse_element(element(cls, fields(6)))
val ra: Fielder = parse_element(element(cls, fields(7)))
val tbkr: Fielder = parse_element(element(cls, fields(8)))
val tpo: Fielder = parse_element(element(cls, fields(9)))
val tppo: Fielder = parse_element(element(cls, fields(10)))
val tv: Fielder = parse_element(element(cls, fields(11)))
val vt: Fielder = parse_element(element(cls, fields(12)))
val LoadAggregate: Fielder = parse_attribute(attribute(cls, fields(13)))
def parse (context: CIMContext): LoadMotor =
{
implicit val ctx: CIMContext = context
implicit val bitfields: Array[Int] = Array(0)
val ret = LoadMotor(
IdentifiedObject.parse(context),
toDouble(mask(d(), 0)),
toDouble(mask(h(), 1)),
toDouble(mask(lfac(), 2)),
toDouble(mask(lp(), 3)),
toDouble(mask(lpp(), 4)),
toDouble(mask(ls(), 5)),
toDouble(mask(pfrac(), 6)),
toDouble(mask(ra(), 7)),
toDouble(mask(tbkr(), 8)),
toDouble(mask(tpo(), 9)),
toDouble(mask(tppo(), 10)),
toDouble(mask(tv(), 11)),
toDouble(mask(vt(), 12)),
mask(LoadAggregate(), 13)
)
ret.bitfields = bitfields
ret
}
def serializer: Serializer[LoadMotor] = LoadMotorSerializer
}
object LoadMotorSerializer extends CIMSerializer[LoadMotor]
{
def write (kryo: Kryo, output: Output, obj: LoadMotor): Unit =
{
val toSerialize: Array[() => Unit] = Array(
() => output.writeDouble(obj.d),
() => output.writeDouble(obj.h),
() => output.writeDouble(obj.lfac),
() => output.writeDouble(obj.lp),
() => output.writeDouble(obj.lpp),
() => output.writeDouble(obj.ls),
() => output.writeDouble(obj.pfrac),
() => output.writeDouble(obj.ra),
() => output.writeDouble(obj.tbkr),
() => output.writeDouble(obj.tpo),
() => output.writeDouble(obj.tppo),
() => output.writeDouble(obj.tv),
() => output.writeDouble(obj.vt),
() => output.writeString(obj.LoadAggregate)
)
IdentifiedObjectSerializer.write(kryo, output, obj.sup)
implicit val bitfields: Array[Int] = obj.bitfields
writeBitfields(output)
writeFields(toSerialize)
}
def read (kryo: Kryo, input: Input, cls: Class[LoadMotor]): LoadMotor =
{
val parent = IdentifiedObjectSerializer.read(kryo, input, classOf[IdentifiedObject])
implicit val bitfields: Array[Int] = readBitfields(input)
val obj = LoadMotor(
parent,
if (isSet(0)) input.readDouble else 0.0,
if (isSet(1)) input.readDouble else 0.0,
if (isSet(2)) input.readDouble else 0.0,
if (isSet(3)) input.readDouble else 0.0,
if (isSet(4)) input.readDouble else 0.0,
if (isSet(5)) input.readDouble else 0.0,
if (isSet(6)) input.readDouble else 0.0,
if (isSet(7)) input.readDouble else 0.0,
if (isSet(8)) input.readDouble else 0.0,
if (isSet(9)) input.readDouble else 0.0,
if (isSet(10)) input.readDouble else 0.0,
if (isSet(11)) input.readDouble else 0.0,
if (isSet(12)) input.readDouble else 0.0,
if (isSet(13)) input.readString else null
)
obj.bitfields = bitfields
obj
}
}
/**
* General static load.
*
* This model represents the sensitivity of the real and reactive power consumed by the load to the amplitude and frequency of the bus voltage.
*
* @param IdentifiedObject [[ch.ninecode.model.IdentifiedObject IdentifiedObject]] Reference to the superclass object.
* @param ep1 First term voltage exponent for active power (<i>Ep1</i>).
* Used only when .staticLoadModelType = exponential.
* @param ep2 Second term voltage exponent for active power (<i>Ep2</i>).
* Used only when .staticLoadModelType = exponential.
* @param ep3 Third term voltage exponent for active power (<i>Ep3</i>).
* Used only when .staticLoadModelType = exponential.
* @param eq1 First term voltage exponent for reactive power (<i>Eq1</i>).
* Used only when .staticLoadModelType = exponential.
* @param eq2 Second term voltage exponent for reactive power (<i>Eq2</i>).
* Used only when .staticLoadModelType = exponential.
* @param eq3 Third term voltage exponent for reactive power (<i>Eq3</i>).
* Used only when .staticLoadModelType = exponential.
* @param kp1 First term voltage coefficient for active power (<i>K</i><i><sub>p1</sub></i>).
* Not used when .staticLoadModelType = constantZ.
* @param kp2 Second term voltage coefficient for active power (<i>K</i><i><sub>p2</sub></i>).
* Not used when .staticLoadModelType = constantZ.
* @param kp3 Third term voltage coefficient for active power (<i>K</i><i><sub>p3</sub></i>).
* Not used when .staticLoadModelType = constantZ.
* @param kp4 Frequency coefficient for active power (<i>K</i><i><sub>p4</sub></i>) (not = 0 if .staticLoadModelType = zIP2).
* Used only when .staticLoadModelType = zIP2.
* @param kpf Frequency deviation coefficient for active power (<i>K</i><i><sub>pf</sub></i>).
* Not used when .staticLoadModelType = constantZ.
* @param kq1 First term voltage coefficient for reactive power (<i>K</i><i><sub>q1</sub></i>).
* Not used when .staticLoadModelType = constantZ.
* @param kq2 Second term voltage coefficient for reactive power (<i>K</i><i><sub>q2</sub></i>).
* Not used when .staticLoadModelType = constantZ.
* @param kq3 Third term voltage coefficient for reactive power (<i>K</i><i><sub>q3</sub></i>).
* Not used when .staticLoadModelType = constantZ.
* @param kq4 Frequency coefficient for reactive power (<i>K</i><i><sub>q4</sub></i>) (not = 0 when .staticLoadModelType = zIP2).
* Used only when .staticLoadModelType - zIP2.
* @param kqf Frequency deviation coefficient for reactive power (<i>K</i><i><sub>qf</sub></i>).
* Not used when .staticLoadModelType = constantZ.
* @param staticLoadModelType Type of static load model.
* Typical value = constantZ.
* @param LoadAggregate [[ch.ninecode.model.LoadAggregate LoadAggregate]] Aggregate load to which this aggregate static load belongs.
* @group LoadDynamics
* @groupname LoadDynamics Package LoadDynamics
* @groupdesc LoadDynamics Dynamic load models are used to represent the dynamic real and reactive load behaviour of a load from the static power flow model.
* Dynamic load models can be defined as applying either to a single load (energy consumer) or to a group of energy consumers.
* Large industrial motors or groups of similar motors can be represented by a synchronous machine model (SynchronousMachineDynamics) or an asynchronous machine model (AsynchronousMachineDynamics), which are usually represented as generators with negative active power output in the static (power flow) data.
*/
final case class LoadStatic
(
IdentifiedObject: IdentifiedObject = null,
ep1: Double = 0.0,
ep2: Double = 0.0,
ep3: Double = 0.0,
eq1: Double = 0.0,
eq2: Double = 0.0,
eq3: Double = 0.0,
kp1: Double = 0.0,
kp2: Double = 0.0,
kp3: Double = 0.0,
kp4: Double = 0.0,
kpf: Double = 0.0,
kq1: Double = 0.0,
kq2: Double = 0.0,
kq3: Double = 0.0,
kq4: Double = 0.0,
kqf: Double = 0.0,
staticLoadModelType: String = null,
LoadAggregate: String = null
)
extends
Element
{
/**
* Return the superclass object.
*
* @return The typed superclass nested object.
* @group Hierarchy
* @groupname Hierarchy Class Hierarchy Related
* @groupdesc Hierarchy Members related to the nested hierarchy of CIM classes.
*/
override def sup: IdentifiedObject = IdentifiedObject
//
// Row overrides
//
/**
* Return a copy of this object as a Row.
*
* Creates a clone of this object for use in Row manipulations.
*
* @return The copy of the object.
* @group Row
* @groupname Row SQL Row Implementation
* @groupdesc Row Members related to implementing the SQL Row interface
*/
override def copy (): Row =
{
clone().asInstanceOf[Row]
}
override def export_fields: String =
{
implicit val s: StringBuilder = new StringBuilder(sup.export_fields)
implicit val clz: String = LoadStatic.cls
def emitelem (position: Int, value: Any): Unit = if (mask(position)) emit_element(LoadStatic.fields(position), value)
def emitattr (position: Int, value: Any): Unit = if (mask(position)) emit_attribute(LoadStatic.fields(position), value)
emitelem(0, ep1)
emitelem(1, ep2)
emitelem(2, ep3)
emitelem(3, eq1)
emitelem(4, eq2)
emitelem(5, eq3)
emitelem(6, kp1)
emitelem(7, kp2)
emitelem(8, kp3)
emitelem(9, kp4)
emitelem(10, kpf)
emitelem(11, kq1)
emitelem(12, kq2)
emitelem(13, kq3)
emitelem(14, kq4)
emitelem(15, kqf)
emitattr(16, staticLoadModelType)
emitattr(17, LoadAggregate)
s.toString
}
override def export: String =
{
"\\t<cim:LoadStatic rdf:%s=\\"%s\\">\\n%s\\t</cim:LoadStatic>".format(if (about) "about" else "ID", id, export_fields)
}
}
object LoadStatic
extends
CIMParseable[LoadStatic]
{
override val fields: Array[String] = Array[String](
"ep1",
"ep2",
"ep3",
"eq1",
"eq2",
"eq3",
"kp1",
"kp2",
"kp3",
"kp4",
"kpf",
"kq1",
"kq2",
"kq3",
"kq4",
"kqf",
"staticLoadModelType",
"LoadAggregate"
)
override val relations: List[CIMRelationship] = List(
CIMRelationship("LoadAggregate", "LoadAggregate", "1", "0..1")
)
val ep1: Fielder = parse_element(element(cls, fields(0)))
val ep2: Fielder = parse_element(element(cls, fields(1)))
val ep3: Fielder = parse_element(element(cls, fields(2)))
val eq1: Fielder = parse_element(element(cls, fields(3)))
val eq2: Fielder = parse_element(element(cls, fields(4)))
val eq3: Fielder = parse_element(element(cls, fields(5)))
val kp1: Fielder = parse_element(element(cls, fields(6)))
val kp2: Fielder = parse_element(element(cls, fields(7)))
val kp3: Fielder = parse_element(element(cls, fields(8)))
val kp4: Fielder = parse_element(element(cls, fields(9)))
val kpf: Fielder = parse_element(element(cls, fields(10)))
val kq1: Fielder = parse_element(element(cls, fields(11)))
val kq2: Fielder = parse_element(element(cls, fields(12)))
val kq3: Fielder = parse_element(element(cls, fields(13)))
val kq4: Fielder = parse_element(element(cls, fields(14)))
val kqf: Fielder = parse_element(element(cls, fields(15)))
val staticLoadModelType: Fielder = parse_attribute(attribute(cls, fields(16)))
val LoadAggregate: Fielder = parse_attribute(attribute(cls, fields(17)))
def parse (context: CIMContext): LoadStatic =
{
implicit val ctx: CIMContext = context
implicit val bitfields: Array[Int] = Array(0)
val ret = LoadStatic(
IdentifiedObject.parse(context),
toDouble(mask(ep1(), 0)),
toDouble(mask(ep2(), 1)),
toDouble(mask(ep3(), 2)),
toDouble(mask(eq1(), 3)),
toDouble(mask(eq2(), 4)),
toDouble(mask(eq3(), 5)),
toDouble(mask(kp1(), 6)),
toDouble(mask(kp2(), 7)),
toDouble(mask(kp3(), 8)),
toDouble(mask(kp4(), 9)),
toDouble(mask(kpf(), 10)),
toDouble(mask(kq1(), 11)),
toDouble(mask(kq2(), 12)),
toDouble(mask(kq3(), 13)),
toDouble(mask(kq4(), 14)),
toDouble(mask(kqf(), 15)),
mask(staticLoadModelType(), 16),
mask(LoadAggregate(), 17)
)
ret.bitfields = bitfields
ret
}
def serializer: Serializer[LoadStatic] = LoadStaticSerializer
}
object LoadStaticSerializer extends CIMSerializer[LoadStatic]
{
def write (kryo: Kryo, output: Output, obj: LoadStatic): Unit =
{
val toSerialize: Array[() => Unit] = Array(
() => output.writeDouble(obj.ep1),
() => output.writeDouble(obj.ep2),
() => output.writeDouble(obj.ep3),
() => output.writeDouble(obj.eq1),
() => output.writeDouble(obj.eq2),
() => output.writeDouble(obj.eq3),
() => output.writeDouble(obj.kp1),
() => output.writeDouble(obj.kp2),
() => output.writeDouble(obj.kp3),
() => output.writeDouble(obj.kp4),
() => output.writeDouble(obj.kpf),
() => output.writeDouble(obj.kq1),
() => output.writeDouble(obj.kq2),
() => output.writeDouble(obj.kq3),
() => output.writeDouble(obj.kq4),
() => output.writeDouble(obj.kqf),
() => output.writeString(obj.staticLoadModelType),
() => output.writeString(obj.LoadAggregate)
)
IdentifiedObjectSerializer.write(kryo, output, obj.sup)
implicit val bitfields: Array[Int] = obj.bitfields
writeBitfields(output)
writeFields(toSerialize)
}
def read (kryo: Kryo, input: Input, cls: Class[LoadStatic]): LoadStatic =
{
val parent = IdentifiedObjectSerializer.read(kryo, input, classOf[IdentifiedObject])
implicit val bitfields: Array[Int] = readBitfields(input)
val obj = LoadStatic(
parent,
if (isSet(0)) input.readDouble else 0.0,
if (isSet(1)) input.readDouble else 0.0,
if (isSet(2)) input.readDouble else 0.0,
if (isSet(3)) input.readDouble else 0.0,
if (isSet(4)) input.readDouble else 0.0,
if (isSet(5)) input.readDouble else 0.0,
if (isSet(6)) input.readDouble else 0.0,
if (isSet(7)) input.readDouble else 0.0,
if (isSet(8)) input.readDouble else 0.0,
if (isSet(9)) input.readDouble else 0.0,
if (isSet(10)) input.readDouble else 0.0,
if (isSet(11)) input.readDouble else 0.0,
if (isSet(12)) input.readDouble else 0.0,
if (isSet(13)) input.readDouble else 0.0,
if (isSet(14)) input.readDouble else 0.0,
if (isSet(15)) input.readDouble else 0.0,
if (isSet(16)) input.readString else null,
if (isSet(17)) input.readString else null
)
obj.bitfields = bitfields
obj
}
}
private[ninecode] object _LoadDynamics
{
def register: List[CIMClassInfo] =
{
List(
LoadAggregate.register,
LoadComposite.register,
LoadDynamics.register,
LoadGenericNonLinear.register,
LoadMotor.register,
LoadStatic.register
)
}
}
|
derrickoswald/CIMScala
|
CIMReader/src/main/scala/ch/ninecode/model/LoadDynamics.scala
|
Scala
|
mit
| 48,074 |
package org.vitrivr.adampro.data.index.structures.lsh.hashfunction
import org.vitrivr.adampro.data.datatypes.vector.Vector.{MathVector}
import scala.util.Random
/**
* ADAMpro
*
* Ivan Giangreco
* April 2017
*/
@SerialVersionUID(100L)
class HammingHashFunction(j: Int) extends LSHashFunction with Serializable {
/**
*
* @param v
* @return
*/
def hash(v: MathVector): Int = v.apply(j).toInt
}
object HammingHashFunction {
def withDimension(d: Int) = new HammingHashFunction(Random.nextInt(d))
}
|
dbisUnibas/ADAMpro
|
src/main/scala/org/vitrivr/adampro/data/index/structures/lsh/hashfunction/HammingHashFunction.scala
|
Scala
|
mit
| 528 |
package org.monarchinitiative.dosdp
import org.semanticweb.owlapi.apibinding.OWLManager
import org.semanticweb.owlapi.manchestersyntax.parser.ManchesterOWLSyntaxClassExpressionParser
import zio._
import zio.test.Assertion._
import zio.test._
object ExpressionParsingTest extends DefaultRunnableSpec {
val factory = OWLManager.getOWLDataFactory
def spec = suite("Test expression parsing")(
testM("Datatype restrictions should be parseable") {
val specifiedPrefixes = Map("ex" -> "http://example.org/")
val prefixes = specifiedPrefixes.orElse(OBOPrefixes)
val dosdp = DOSDP.empty.copy(
classes = Some(Map("population of Drosophila" -> "ex:1")),
relations = Some(Map("inheres_in" -> "ex:2")),
dataProperties = Some(Map("has_increased_mortality_rate" -> "ex:3")))
val edosdp = ExpandedDOSDP(dosdp, prefixes)
val expressionParser = new ManchesterOWLSyntaxClassExpressionParser(OWLManager.getOWLDataFactory, edosdp.checker)
val result = ZIO.effect(expressionParser.parse("'inheres_in' some ('population of Drosophila') and ('has_increased_mortality_rate' some xsd:short[>= 98])")).either
assertM(result)(isRight)
},
testM("Full IRI of owl:Thing should be parseable") {
// the issue here was that the # in the Thing IRI was commenting out the rest of the expression
val dosdp = DOSDP.empty
val checker = new DOSDPEntityChecker(dosdp, OBOPrefixes)
val expressionParser = new ManchesterOWLSyntaxClassExpressionParser(OWLManager.getOWLDataFactory, checker)
val expressionOpt = PrintfText.replaced(Some("%s and %s"),
Some(List("thing", "nothing")),
None,
Some(Map(
"thing" -> SingleValue("http://www.w3.org/2002/07/owl#Thing"),
"nothing" -> SingleValue("http://www.w3.org/2002/07/owl#Nothing"))), true)
for {
expression <- ZIO.fromOption(expressionOpt)
ce <- ZIO.effect(expressionParser.parse(expression))
} yield assert(ce)(equalTo(factory.getOWLObjectIntersectionOf(factory.getOWLThing, factory.getOWLNothing)))
}
)
}
|
INCATools/dosdp-tools
|
src/test/scala/org/monarchinitiative/dosdp/ExpressionParsingTest.scala
|
Scala
|
mit
| 2,104 |
package lila.simul
import play.api.libs.json._
import lila.common.LightUser
import lila.common.PimpedJson._
import lila.game.{ Game, GameRepo }
import lila.user.{ User, UserRepo }
final class JsonView(
getLightUser: String => Option[LightUser]) {
private def fetchGames(simul: Simul) =
if (simul.isFinished) GameRepo gamesFromSecondary simul.gameIds
else GameRepo gamesFromPrimary simul.gameIds
def apply(simul: Simul): Fu[JsObject] =
fetchGames(simul) map { games =>
val lightHost = getLightUser(simul.hostId)
Json.obj(
"id" -> simul.id,
"host" -> lightHost.map { host =>
Json.obj(
"id" -> host.id,
"username" -> host.name,
"title" -> host.title,
"rating" -> simul.hostRating,
"gameId" -> simul.hostGameId)
},
"name" -> simul.name,
"fullName" -> simul.fullName,
"variants" -> simul.variants.map(variantJson(chess.Speed(simul.clock.config.some))),
"applicants" -> simul.applicants.sortBy(-_.player.rating).map(applicantJson),
"pairings" -> simul.pairings.sortBy(-_.player.rating).map(pairingJson(games, simul.hostId)),
"isCreated" -> simul.isCreated,
"isRunning" -> simul.isRunning,
"isFinished" -> simul.isFinished,
"quote" -> lila.quote.Quote.one(simul.id))
}
private def variantJson(speed: chess.Speed)(v: chess.variant.Variant) = Json.obj(
"key" -> v.key,
"icon" -> lila.game.PerfPicker.perfType(speed, v, none).map(_.iconChar.toString),
"name" -> v.name)
private def playerJson(player: SimulPlayer) = {
val light = getLightUser(player.user)
Json.obj(
"id" -> player.user,
"variant" -> player.variant.key,
"username" -> light.map(_.name),
"title" -> light.map(_.title),
"rating" -> player.rating,
"provisional" -> player.provisional.filter(identity),
"patron" -> light.??(_.isPatron).option(true)
).noNull
}
private def applicantJson(app: SimulApplicant) = Json.obj(
"player" -> playerJson(app.player),
"accepted" -> app.accepted)
private def gameJson(hostId: String)(g: Game) = Json.obj(
"id" -> g.id,
"status" -> g.status.id,
"fen" -> (chess.format.Forsyth exportBoard g.toChess.board),
"lastMove" -> ~g.castleLastMoveTime.lastMoveString,
"orient" -> g.playerByUserId(hostId).map(_.color))
private def pairingJson(games: List[Game], hostId: String)(p: SimulPairing) = Json.obj(
"player" -> playerJson(p.player),
"hostColor" -> p.hostColor,
"winnerColor" -> p.winnerColor,
"wins" -> p.wins, // can't be normalized because BC
"game" -> games.find(_.id == p.gameId).map(gameJson(hostId))
)
private implicit val colorWriter: Writes[chess.Color] = Writes { c =>
JsString(c.name)
}
}
|
clarkerubber/lila
|
modules/simul/src/main/JsonView.scala
|
Scala
|
agpl-3.0
| 2,833 |
package com.twitter.finagle.serverset2
import collection.immutable
import com.twitter.conversions.time._
import com.twitter.finagle.MockTimer
import com.twitter.finagle.serverset2.client._
import com.twitter.io.Buf
import com.twitter.util._
import java.util.concurrent.atomic.AtomicReference
import org.junit.runner.RunWith
import org.scalatest.concurrent.{Eventually, IntegrationPatience}
import org.scalatest.FunSuite
import org.scalatest.junit.JUnitRunner
sealed private trait ZkOp { type Res; val res = new Promise[Res] }
private object ZkOp {
case class Exists(path: String) extends ZkOp {
type Res = Option[Data.Stat]
}
case class ExistsWatch(path: String) extends ZkOp {
type Res = Watched[Option[Data.Stat]]
}
case class GetChildren(path: String) extends ZkOp {
type Res = Node.Children
}
case class GetChildrenWatch(path: String) extends ZkOp {
type Res = Watched[Node.Children]
}
case class GlobWatch(pat: String) extends ZkOp {
type Res = Watched[Seq[String]]
}
case class GetData(path: String) extends ZkOp {
type Res = Node.Data
}
case class GetDataWatch(path: String) extends ZkOp {
type Res = Watched[Node.Data]
}
case class GetEphemerals() extends ZkOp {
type Res = Seq[String]
}
case class Sync(path: String) extends ZkOp {
type Res = Unit
}
case class Close(deadline: Time) extends ZkOp {
type Res = Unit
}
case class AddAuthInfo(scheme: String, auth: Buf) extends ZkOp {
type Res = Unit
}
}
private class OpqueueZkReader(
val sessionId: Long,
val sessionPasswd: Buf,
val sessionTimeout: Duration)
extends ZooKeeperReader
{
import ZkOp._
def this() = this(0, Buf.Empty, Duration.Zero)
@volatile var opq: immutable.Queue[ZkOp] = immutable.Queue.empty
private def enqueue(op: ZkOp): Future[op.Res] = synchronized {
opq = opq enqueue op
op.res
}
def exists(path: String) = enqueue(Exists(path))
def existsWatch(path: String) = enqueue(ExistsWatch(path))
def getChildren(path: String) = enqueue(GetChildren(path))
def getChildrenWatch(path: String) = enqueue(GetChildrenWatch(path))
def globPrefixWatch(pat: String) = enqueue(GlobWatch(pat))
def getData(path: String) = enqueue(GetData(path))
def getDataWatch(path: String) = enqueue(GetDataWatch(path))
def getEphemerals() = enqueue(GetEphemerals())
def sync(path: String) = enqueue(Sync(path))
def close(deadline: Time) = enqueue(Close(deadline))
def addAuthInfo(scheme: String, auth: Buf) =
enqueue(AddAuthInfo(scheme, auth))
def getACL(path: String): Future[Node.ACL] = Future.never
}
@RunWith(classOf[JUnitRunner])
class ZkSessionTest extends FunSuite with Eventually with IntegrationPatience {
import ZkOp._
test("ops retry safely") { Time.withCurrentTimeFrozen { tc =>
implicit val timer = new MockTimer
val watchedZk = Watched(new OpqueueZkReader(), Var(WatchState.Pending))
val zk = new ZkSession(watchedZk)
val v = zk.existsOf("/foo/bar")
// An unobserved Var makes no side effect.
assert(watchedZk.value.opq.isEmpty)
val ref = new AtomicReference[Activity.State[Option[Data.Stat]]]
val o = v.states.register(Witness(ref))
assert(watchedZk.value.opq === Seq(ExistsWatch("/foo/bar")))
assert(ref.get === Activity.Pending)
assert(timer.tasks.isEmpty)
watchedZk.value.opq(0).res() = Throw(new KeeperException.ConnectionLoss(None))
assert(timer.tasks.size === 1)
tc.advance(20.milliseconds)
timer.tick()
assert(watchedZk.value.opq === Seq(ExistsWatch("/foo/bar"), ExistsWatch("/foo/bar")))
assert(ref.get === Activity.Pending)
watchedZk.value.opq(1).res() = Throw(new KeeperException.SessionExpired(None))
assert(watchedZk.value.opq === Seq(ExistsWatch("/foo/bar"), ExistsWatch("/foo/bar")))
val Activity.Failed(exc) = ref.get
assert(exc.isInstanceOf[KeeperException.SessionExpired])
}}
test("ZkSession.globOf") { Time.withCurrentTimeFrozen { tc =>
implicit val timer = new MockTimer
val watchedZk = Watched(new OpqueueZkReader(), Var(WatchState.Pending))
val zk = new ZkSession(watchedZk)
val v = zk.globOf("/foo/bar/")
val ref = new AtomicReference[Activity.State[Seq[String]]]
v.states.register(Witness(ref))
assert(ref.get === Activity.Pending)
val Seq(ew@ExistsWatch("/foo/bar")) = watchedZk.value.opq
val ewwatchv = Var[WatchState](WatchState.Pending)
ew.res() = Return(Watched(None, ewwatchv))
assert(watchedZk.value.opq === Seq(ExistsWatch("/foo/bar")))
assert(ref.get === Activity.Ok(Seq.empty))
ewwatchv() = WatchState.Determined(NodeEvent.ChildrenChanged)
val Seq(`ew`, ew2@ExistsWatch("/foo/bar")) = watchedZk.value.opq
assert(ref.get === Activity.Ok(Seq.empty))
val ew2watchv = Var[WatchState](WatchState.Pending)
ew2.res() = Return(Watched(Some(Data.Stat(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)), ew2watchv))
val Seq(`ew`, `ew2`, gw@GetChildrenWatch("/foo/bar")) = watchedZk.value.opq
assert(ref.get === Activity.Pending)
gw.res() = Return(Watched(Node.Children(Seq("a", "b", "c"), null), Var.value(WatchState.Pending)))
assert(ref.get === Activity.Ok(Seq("/foo/bar/a", "/foo/bar/b", "/foo/bar/c")))
assert(watchedZk.value.opq === Seq(ew, ew2, gw))
ew2watchv() = WatchState.Determined(NodeEvent.ChildrenChanged)
val Seq(`ew`, `ew2`, `gw`, ew3@ExistsWatch("/foo/bar")) = watchedZk.value.opq
ew3.res() = Return(Watched(None, Var.value(WatchState.Pending)))
assert(ref.get === Activity.Ok(Seq.empty))
}}
test("factory authenticates and closes on expiry") { Time.withCurrentTimeFrozen { tc =>
val identity = Identities.get().head
val authInfo = "%s:%s".format(identity, identity)
implicit val timer = new MockTimer
val zkState: Var[WatchState] with Updatable[WatchState] = Var(WatchState.Pending)
val watchedZk = Watched(new OpqueueZkReader(), zkState)
val zk = ZkSession.retrying(5.seconds, () => new ZkSession(watchedZk))
zk.changes.respond {
case _ => ()
}
zkState() = WatchState.SessionState(SessionState.SyncConnected)
eventually {
assert(watchedZk.value.opq === Seq(AddAuthInfo("digest", Buf.Utf8(authInfo))))
}
zkState() = WatchState.SessionState(SessionState.Expired)
tc.advance(10.seconds)
timer.tick()
eventually {
assert(watchedZk.value.opq === Seq(
AddAuthInfo("digest", Buf.Utf8(authInfo)),
Close(Time.Bottom)
))
}
zkState() = WatchState.SessionState(SessionState.SyncConnected)
eventually {
assert(watchedZk.value.opq === Seq(
AddAuthInfo("digest", Buf.Utf8(authInfo)),
Close(Time.Bottom),
AddAuthInfo("digest", Buf.Utf8(authInfo))
))
}
}}
}
|
travisbrown/finagle
|
finagle-serversets/src/test/scala/com/twitter/finagle/serverset2/ZkSessionTest.scala
|
Scala
|
apache-2.0
| 6,780 |
/*
* Copyright (c) 2014, Brook 'redattack34' Heisler
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the ModularRayguns team nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package com.castlebravostudios.rayguns.utils
import scala.collection.mutable.Buffer
import com.castlebravostudios.rayguns.mod.Config
import scala.collection.SortedSet
import scala.util.Random
object MidpointDisplacement {
private val minDetail = Config.minLightningDetail
private val pregeneratedBoltLists = Vector.fill(100){
createPositionList( Vector3( 0, 0, 0 ), Vector3( 0, 0, 4 ) )
}
private val rand = new Random()
def createPositionList( start : Vector3, end : Vector3) : Seq[Vector3] = {
val buffer = Buffer[Vector3]( start.subtract(start) )
def positionRec( start : Vector3, end : Vector3, displace : Double ) : Unit = {
if ( displace < minDetail ) {
buffer += end
}
else {
val x = ( start.x + end.x ) / 2 + (Math.random() - 0.5) * displace
val y = ( start.y + end.y ) / 2 + (Math.random() - 0.5) * displace
val z = ( start.z + end.z ) / 2 + (Math.random() - 0.5) * displace
val mid = Vector3( x, y, z )
positionRec( start, mid, displace / 2 )
positionRec( mid, end, displace / 2 )
}
}
positionRec( start.subtract(start), end.subtract(start), end.subtract(start).length / 8 )
buffer.toVector
}
def getBoltList : Seq[Vector3] = pregeneratedBoltLists( rand.nextInt(100) )
}
|
Redattack34/ModularRayguns
|
src/main/scala/com/castlebravostudios/rayguns/utils/MidpointDisplacement.scala
|
Scala
|
bsd-3-clause
| 2,905 |
/**
* © 2019 Refinitiv. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the “License”); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an “AS IS” BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmwell.crawler
import akka.kafka.scaladsl.Consumer
import akka.stream._
import akka.stream.stage.{GraphStage, GraphStageLogic, InHandler, OutHandler}
import com.typesafe.scalalogging.LazyLogging
import org.apache.kafka.clients.consumer.ConsumerRecord
object OffsetThrottler {
def apply(crawlerId: String): OffsetThrottler = new OffsetThrottler(crawlerId)
}
class OffsetThrottler(crawlerId: String)
extends GraphStage[FanInShape2[Long, ConsumerRecord[Array[Byte], Array[Byte]], ConsumerRecord[Array[Byte], Array[Byte]]]] with LazyLogging {
private val offsetIn = Inlet[Long]("OffsetThrottler.offsetIn")
private val messageIn = Inlet[ConsumerRecord[Array[Byte], Array[Byte]]]("OffsetThrottler.messageIn")
private val messageOut = Outlet[ConsumerRecord[Array[Byte], Array[Byte]]]("OffsetThrottler.messageOut")
override val shape = new FanInShape2(offsetIn, messageIn, messageOut)
override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) {
private var pending: ConsumerRecord[Array[Byte], Array[Byte]] = _
private var maxAllowedOffset: Long = -1L
private val offsetInHandler: InHandler = new InHandler {
override def onPush(): Unit = {
maxAllowedOffset = grab(offsetIn)
//checking that the port isn't closed isn't necessary because the whole stage will be finished by then
//also, no need for isAvailable check because:
//messageOut-onPull->pull(messageIn)->pull(offsetIn)=>isAvailable(messageOut)==true
if (pending.offset <= maxAllowedOffset /* && !isClosed(messageOut)*/ ) {
logger.info(s"$crawlerId Got a new max allowed offset $maxAllowedOffset. Releasing the back pressure.")
if (pending.offset() == maxAllowedOffset)
logger.info(s"$crawlerId The current element's offset $maxAllowedOffset is the same as the max allowed one. " +
s"This means the crawler is going to handle the last infoton before horizon.")
push(messageOut, pending)
pending = null
if (isClosed(messageIn))
completeStage()
}
else {
logger.info(s"$crawlerId Got a new max allowed offset $maxAllowedOffset but the pending message has offset ${pending.offset}. " +
s"Pulling again from another max allowed offset.")
pull(offsetIn)
}
}
override def onUpstreamFinish(): Unit = {
if (isClosed(messageIn) || pending.offset > maxAllowedOffset)
completeStage()
}
override def onUpstreamFailure(ex: Throwable): Unit = {
super.onUpstreamFailure(ex)
failStage(ex)
}
}
private val initialMessageInHandler: InHandler = new InHandler {
override def onPush(): Unit = {
val elem = grab(messageIn)
pending = elem
logger.info(s"$crawlerId Initial message with offset ${pending.offset()} received - " +
s"pulling the offset source for the max allowed offset (setting back pressure)")
pull(offsetIn)
//from now on, each message we get should be checked against the maxAllowedOffset - set a new handler for the newly got messages
setHandler(messageIn, ongoingMessageInHandler)
}
override def onUpstreamFinish(): Unit = {
super.onUpstreamFinish()
completeStage()
}
override def onUpstreamFailure(ex: Throwable): Unit = {
super.onUpstreamFailure(ex)
failStage(ex)
}
}
private val ongoingMessageInHandler: InHandler = new InHandler {
override def onPush(): Unit = {
val elem = grab(messageIn)
pending = elem
if (pending.offset > maxAllowedOffset) {
logger.info(s"$crawlerId Got a message with offset ${pending.offset} that is larger than the current max allowed $maxAllowedOffset. " +
s"Pulling the offset source for a newer maxAllowedOffset (setting back pressure)")
pull(offsetIn)
}
//checking that the port isn't closed isn't necessary because the whole stage will be finished by then
else {
/* if (!isClosed(messageOut))*/
if (pending.offset() == maxAllowedOffset)
logger.info(s"$crawlerId The current element's offset $maxAllowedOffset is the same as the max allowed one. " +
s"This means the crawler is going to handle the last infoton before horizon.")
push(messageOut, pending)
pending = null
}
}
override def onUpstreamFinish(): Unit = {
if (pending == null || isClosed(offsetIn))
completeStage()
}
override def onUpstreamFailure(ex: Throwable): Unit = {
super.onUpstreamFailure(ex)
failStage(ex)
}
}
private val messageOutHandler: OutHandler = new OutHandler {
override def onPull(): Unit = {
pull(messageIn)
}
override def onDownstreamFinish(): Unit = {
super.onDownstreamFinish()
completeStage()
}
}
//initial handler allocation
setHandler(offsetIn, offsetInHandler)
setHandler(messageIn, initialMessageInHandler)
setHandler(messageOut, messageOutHandler)
}
}
|
e-orz/CM-Well
|
server/cmwell-bg/src/main/scala/cmwell/crawler/OffsetThrottler.scala
|
Scala
|
apache-2.0
| 5,834 |
package com.github.bruneli.scalaopt.core.discrete
import com.github.bruneli.scalaopt.core.linalg.DenseVectorLike
import com.github.bruneli.scalaopt.core.variable.{DiscreteVariable, Variable, Variables}
class MostExtremeBranching extends BranchingStrategy {
import MostExtremeBranching._
def findCutVariable(program: MIP[_, _, _],
relaxationSolution: DenseVectorLike[Variable]): Option[MostExtremeBranchingCut] = {
program.discreteVariableIndices.foldLeft[Option[MostExtremeBranchingCut]](None)(updateCutVariable(relaxationSolution, program.variables))
}
private def updateCutVariable(solution: DenseVectorLike[Variable], variables: Variables[Variable])(
previous: Option[MostExtremeBranchingCut], index: Int): Option[MostExtremeBranchingCut] = {
val discreteVariable = variables(index).asInstanceOf[DiscreteVariable]
val dxLow = discreteVariable.floor(solution(index)).map(solution(index).x - _.x).getOrElse(0.0)
val dxUp = discreteVariable.ceil(solution(index)).map(_.x - solution(index).x).getOrElse(0.0)
val dx = Math.max(dxLow, dxUp)
if (previous.isEmpty || dx > previous.get.dx) {
Some(MostExtremeBranchingCut(discreteVariable, index, dx))
} else {
previous
}
}
}
object MostExtremeBranching {
case class MostExtremeBranchingCut(variable: DiscreteVariable, index: Int, dx: Double) extends BranchingCut
}
|
bruneli/scalaopt
|
core/src/main/scala/com/github/bruneli/scalaopt/core/discrete/MostExtremeBranching.scala
|
Scala
|
apache-2.0
| 1,400 |
package test
object A {
val b = new B()
println(b.a)
}
|
dragos/scala-ide
|
org.scala-ide.sdt.core.tests/test-workspace/builder-deprecation-warnings/src/test/A.scala
|
Scala
|
bsd-3-clause
| 59 |
package org.workcraft.gui.modeleditor.tools
import org.workcraft.scala.Expressions._
import java.awt.BasicStroke
import java.awt.Color
import java.awt.Graphics2D
import java.awt.Toolkit
import java.awt.event.KeyEvent
import java.awt.event.MouseEvent
import java.awt.geom.Line2D
import java.awt.geom.Point2D
import javax.swing.Icon
import org.workcraft.dependencymanager.advanced.user.Variable
import org.workcraft.exceptions.InvalidConnectionException
import scalaz._
import Scalaz._
import org.workcraft.scala.Scalaz
import org.workcraft.scala.Expressions._
import org.workcraft.graphics.GraphicalContent
import java.awt.event.InputEvent
import org.workcraft.gui.modeleditor.ToolMouseListener
import org.workcraft.gui.modeleditor.Viewport
import org.workcraft.gui.GUI
import org.workcraft.gui.modeleditor.Modifier
import org.workcraft.scala.effects.IO
import org.workcraft.scala.effects.IO._
import org.workcraft.gui.modeleditor.MouseButton
import org.workcraft.gui.modeleditor.LeftButton
import org.workcraft.gui.modeleditor.RightButton
import org.workcraft.gui.modeleditor.tools.{DummyMouseListener => DML}
import org.workcraft.graphics.Graphics
class GenericConnectionToolImpl[N](centerProvider: N => Expression[Point2D.Double],
connectionManager: ConnectionManager[N],
hitTester: Point2D.Double => IO[Option[N]]) {
private val mouseOverObject: ModifiableExpression[Option[N]] = Variable.create[Option[N]](None)
private val first: ModifiableExpression[Option[N]] = Variable.create[Option[N]](None)
private var mouseExitRequiredForSelfLoop: Boolean = true
private var leftFirst: Boolean = false
private var lastMouseCoords: ModifiableExpression[Point2D.Double] = Variable.create(new Point2D.Double)
private var warningMessage: ModifiableExpression[Option[String]] = Variable.create[Option[String]](None)
val mouseOverNode: Expression[Option[N]] = mouseOverObject
val firstNode: Expression[Option[N]] = first
def connectingLineGraphicalContent(viewport: Viewport): Expression[GraphicalContent] =
first >>= {
case None => constant(GraphicalContent.Empty)
case Some(first) => {
warningMessage.setValue(None)
mouseOverObject >>= (mouseOverObject =>
{
def zogo : Expression[(Color, Point2D.Double)] = (mouseOverObject match {
case None => lastMouseCoords.map ((Color.BLUE, _))
case Some(second) => connectionManager.connect(first, second) >>= {
case Left(err) => { warningMessage.setValue(Some(err.getMessage)); lastMouseCoords.map((Color.RED,_)) }
case Right(_) => centerProvider(second).map((Color.GREEN,_))
}})
for(
ogoz <- zogo;
p1 <- centerProvider(first);
p2 = ogoz._2;
color = ogoz._1;
px <- viewport.pixelSizeInUserSpace
) yield
(Graphics.line(p1, p2, new BasicStroke(px.getX.toFloat), color).graphicalContent)
})
}
}
val mouseListener: ToolMouseListener = new DML {
override def mouseMoved(modifiers: Set[Modifier], position: Point2D.Double): IO[Unit] =
lastMouseCoords.set(position) >>=|
hitTester(position) >>= (n => {
mouseOverObject.set(n) >>=|
(if (!leftFirst && mouseExitRequiredForSelfLoop)
first.eval >>= (f => if (f == n) mouseOverObject.set(None) else ioPure.pure { leftFirst = true })
else
IO.Empty)
})
override def buttonPressed(button: MouseButton, modifiers: Set[Modifier], position: Point2D.Double): IO[Unit] = button match {
case LeftButton => first.eval >>= {
case None => mouseOverObject.eval >>= {
case None => IO.Empty
case Some(mouseOver) => (first := mouseOverObject) >>=| ioPure.pure { leftFirst = false } >>=| mouseMoved(modifiers, position)
}
case Some(currentFirst) => {
mouseOverObject.eval >>= {
case None => IO.Empty
case Some(mouseOver) => {
connectionManager.connect(currentFirst, mouseOver).eval >>= {
case Right(connect) => {
connect >>=| (
if (modifiers.contains(Modifier.Control)) (first := mouseOverObject) >>=| mouseOverObject.set(None)
else first.set(None))
}
case Left(err) => ioPure.pure { Toolkit.getDefaultToolkit.beep }
}
}
}
}
}
case RightButton => first.set(None) >>=| mouseOverObject.set(None)
case _ => IO.Empty
}
}
def screenSpaceContent(viewport: Viewport, hasFocus: Expression[Boolean]): Expression[GraphicalContent] =
hasFocus >>= {
case false => constant(GraphicalContent.Empty)
case true => (warningMessage >>= {
case Some(msg) => constant((Color.RED, msg))
case None => first >>= {
case None => constant((Color.BLACK, "Click on the first component"))
case Some(_) => constant((Color.BLACK, "Click on the second component (control+click to connect continuously)"))
}
}) >>= (msg => GUI.editorMessage(viewport, msg._1, msg._2))
}
def deactivated = {
first.setValue(None)
mouseOverObject.setValue(None)
}
}
|
mechkg/workcraft
|
ScalaGraphEditorUtil/src/main/scala/org/workcraft/gui/modeleditor/tools/GenericConnectionToolImpl.scala
|
Scala
|
gpl-3.0
| 5,299 |
/**
* Author: Alexander Slesarenko
* Date: 7/25/12
*/
package scalan.primitives
import scalan.common.OverloadHack._
import scalan.staged.BaseExp
import scalan.{ScalanExp, Scalan, ScalanStd}
trait Tuples { self: Scalan =>
object Pair {
def apply[A, B](a: Rep[A], b: Rep[B]) = zipPair[A, B]((a, b))
def unapply[A, B](p: Rep[(A, B)]) = Some(unzipPair[A, B](p))
}
def unzipPair[A, B](p: Rep[(A, B)]): (Rep[A], Rep[B])
implicit def zipPair[A, B](p: (Rep[A], Rep[B])): Rep[(A, B)]
implicit class ListOps[A, B](t: Rep[(A, B)]) {
def head: Rep[A] = { val Pair(x, _) = t; x }
def tail: Rep[B] = { val Pair(_, x) = t; x }
}
implicit class TupleOps2[A, B](t: Rep[(A, B)]) {
def _1: Rep[A] = { val Pair(x, _) = t; x }
def _2: Rep[B] = { val Pair(_, x) = t; x }
}
implicit class TupleOps3[A,B,C](t: Rep[(A,(B,C))]) {
def _1: Rep[A] = { val Pair(x, _) = t; x }
def _2: Rep[B] = { val Pair(_, Pair(x, _)) = t; x }
def _3: Rep[C] = { val Pair(_, Pair(_, x)) = t; x }
}
implicit class TupleOps4[A,B,C,D](t: Rep[(A,(B,(C,D)))]) {
def _1: Rep[A] = { val Pair(x, _) = t; x }
def _2: Rep[B] = { val Pair(_, Pair(x, _)) = t; x }
def _3: Rep[C] = { val Pair(_, Pair(_, Pair(x, _))) = t; x }
def _4: Rep[D] = { val Pair(_, Pair(_, Pair(_, x))) = t; x }
}
implicit class TupleOps5[A,B,C,D,E](t: Rep[(A,(B,(C,(D,E))))]) {
def _1: Rep[A] = { val Pair(x, _) = t; x }
def _2: Rep[B] = { val Pair(_, Pair(x, _)) = t; x }
def _3: Rep[C] = { val Pair(_, Pair(_, Pair(x, _))) = t; x }
def _4: Rep[D] = { val Pair(_, Pair(_, Pair(_, Pair(x, _)))) = t; x }
def _5: Rep[E] = { val Pair(_, Pair(_, Pair(_, Pair(_, x)))) = t; x }
}
implicit class TupleOps6[A,B,C,D,E,F](t: Rep[(A,(B,(C,(D,(E,F)))))]) {
def _1: Rep[A] = { val Pair(x, _) = t; x }
def _2: Rep[B] = { val Pair(_, Pair(x, _)) = t; x }
def _3: Rep[C] = { val Pair(_, Pair(_, Pair(x, _))) = t; x }
def _4: Rep[D] = { val Pair(_, Pair(_, Pair(_, Pair(x, _)))) = t; x }
def _5: Rep[E] = { val Pair(_, Pair(_, Pair(_, Pair(_, Pair(x, _))))) = t; x }
def _6: Rep[F] = { val Pair(_, Pair(_, Pair(_, Pair(_, Pair(_, x))))) = t; x }
}
implicit class TupleOps7[A,B,C,D,E,F,G](t: Rep[(A,(B,(C,(D,(E,(F,G))))))]) {
def _1: Rep[A] = { val Pair(x, _) = t; x }
def _2: Rep[B] = { val Pair(_, Pair(x, _)) = t; x }
def _3: Rep[C] = { val Pair(_, Pair(_, Pair(x, _))) = t; x }
def _4: Rep[D] = { val Pair(_, Pair(_, Pair(_, Pair(x, _)))) = t; x }
def _5: Rep[E] = { val Pair(_, Pair(_, Pair(_, Pair(_, Pair(x, _))))) = t; x }
def _6: Rep[F] = { val Pair(_, Pair(_, Pair(_, Pair(_, Pair(_, Pair(x, _)))))) = t; x }
def _7: Rep[G] = { val Pair(_, Pair(_, Pair(_, Pair(_, Pair(_, Pair(_, x)))))) = t; x }
}
implicit class TupleOps8[A,B,C,D,E,F,G,H](t: Rep[(A,(B,(C,(D,(E,(F,(G,H)))))))]) {
def _1: Rep[A] = { val Pair(x, _) = t; x }
def _2: Rep[B] = { val Pair(_, Pair(x, _)) = t; x }
def _3: Rep[C] = { val Pair(_, Pair(_, Pair(x, _))) = t; x }
def _4: Rep[D] = { val Pair(_, Pair(_, Pair(_, Pair(x, _)))) = t; x }
def _5: Rep[E] = { val Pair(_, Pair(_, Pair(_, Pair(_, Pair(x, _))))) = t; x }
def _6: Rep[F] = { val Pair(_, Pair(_, Pair(_, Pair(_, Pair(_, Pair(x, _)))))) = t; x }
def _7: Rep[G] = { val Pair(_, Pair(_, Pair(_, Pair(_, Pair(_, Pair(_, Pair(x, _))))))) = t; x }
def _8: Rep[H] = { val Pair(_, Pair(_, Pair(_, Pair(_, Pair(_, Pair(_, Pair(_, x))))))) = t; x }
}
implicit class TupleOps9[A,B,C,D,E,F,G,H,I](t: Rep[(A,(B,(C,(D,(E,(F,(G, (H, I))))))))]) {
def _1: Rep[A] = { val Pair(x, _) = t; x }
def _2: Rep[B] = { val Pair(_, Pair(x, _)) = t; x }
def _3: Rep[C] = { val Pair(_, Pair(_, Pair(x, _))) = t; x }
def _4: Rep[D] = { val Pair(_, Pair(_, Pair(_, Pair(x, _)))) = t; x }
def _5: Rep[E] = { val Pair(_, Pair(_, Pair(_, Pair(_, Pair(x, _))))) = t; x }
def _6: Rep[F] = { val Pair(_, Pair(_, Pair(_, Pair(_, Pair(_, Pair(x, _)))))) = t; x }
def _7: Rep[G] = { val Pair(_, Pair(_, Pair(_, Pair(_, Pair(_, Pair(_, Pair(x, _))))))) = t; x }
def _8: Rep[H] = { val Pair(_, Pair(_, Pair(_, Pair(_, Pair(_, Pair(_, Pair(_, Pair(x, _)))))))) = t; x }
def _9: Rep[I] = { val Pair(_, Pair(_, Pair(_, Pair(_, Pair(_, Pair(_, Pair(_, Pair(_, x)))))))) = t; x }
}
implicit class TupleOps16[A,B,C,D,E,F,G,H,I,J,K,L,M,N,O,P](t: Rep[(A,(B,(C,(D,(E,(F,(G,(H,(I,(J,(K,(L,(M,(N,(O,P)))))))))))))))]) {
def _1: Rep[A] = { val Pair(x, _) = t; x }
def _2: Rep[B] = { val Pair(_, Pair(x, _)) = t; x }
def _3: Rep[C] = { val Pair(_, Pair(_, Pair(x, _))) = t; x }
def _4: Rep[D] = { val Pair(_, Pair(_, Pair(_, Pair(x, _)))) = t; x }
def _5: Rep[E] = { val Pair(_, Pair(_, Pair(_, Pair(_, Pair(x, _))))) = t; x }
def _6: Rep[F] = { val Pair(_, Pair(_, Pair(_, Pair(_, Pair(_, Pair(x, _)))))) = t; x }
def _7: Rep[G] = { val Pair(_, Pair(_, Pair(_, Pair(_, Pair(_, Pair(_, Pair(x, _))))))) = t; x }
def _8: Rep[H] = { val Pair(_, Pair(_, Pair(_, Pair(_, Pair(_, Pair(_, Pair(_, Pair(x, _)))))))) = t; x }
def _9: Rep[I] = { val Pair(_, Pair(_, Pair(_, Pair(_, Pair(_, Pair(_, Pair(_, Pair(_, Pair(x, _))))))))) = t; x }
def _10: Rep[J] = { val Pair(_, Pair(_, Pair(_, Pair(_, Pair(_, Pair(_, Pair(_, Pair(_, Pair(_, Pair(x, _)))))))))) = t; x }
def _11: Rep[K] = { val Pair(_, Pair(_, Pair(_, Pair(_, Pair(_, Pair(_, Pair(_, Pair(_, Pair(_, Pair(_, Pair(x, _))))))))))) = t; x }
def _12: Rep[L] = { val Pair(_, Pair(_, Pair(_, Pair(_, Pair(_, Pair(_, Pair(_, Pair(_, Pair(_, Pair(_, Pair(_, Pair(x, _)))))))))))) = t; x }
def _13: Rep[M] = { val Pair(_, Pair(_, Pair(_, Pair(_, Pair(_, Pair(_, Pair(_, Pair(_, Pair(_, Pair(_, Pair(_, Pair(_, Pair(x, _))))))))))))) = t; x }
def _14: Rep[N] = { val Pair(_, Pair(_, Pair(_, Pair(_, Pair(_, Pair(_, Pair(_, Pair(_, Pair(_, Pair(_, Pair(_, Pair(_, Pair(_, Pair(x, _)))))))))))))) = t; x }
def _15: Rep[O] = { val Pair(_, Pair(_, Pair(_, Pair(_, Pair(_, Pair(_, Pair(_, Pair(_, Pair(_, Pair(_, Pair(_, Pair(_, Pair(_, Pair(_, Pair(x, _))))))))))))))) = t; x }
def _16: Rep[P] = { val Pair(_, Pair(_, Pair(_, Pair(_, Pair(_, Pair(_, Pair(_, Pair(_, Pair(_, Pair(_, Pair(_, Pair(_, Pair(_, Pair(_, Pair(_, x))))))))))))))) = t; x }
}
implicit def zipTuple3[A, B, C](p: (Rep[A], Rep[B], Rep[C])): Rep[(A, (B, C))] =
Tuple(p._1, p._2, p._3)
implicit def zipTuple4[A, B, C, D](p: (Rep[A], Rep[B], Rep[C], Rep[D])): Rep[(A, (B, (C, D)))] =
Tuple(p._1, p._2, p._3, p._4)
implicit def zipTuple5[A, B, C, D, E](p: (Rep[A], Rep[B], Rep[C], Rep[D], Rep[E])): Rep[(A, (B, (C, (D, E))))] =
Tuple(p._1, p._2, p._3, p._4, p._5)
implicit def zipTuple6[A, B, C, D, E, F](p: (Rep[A], Rep[B], Rep[C], Rep[D], Rep[E], Rep[F])): Rep[(A, (B, (C, (D, (E, F)))))] =
Tuple(p._1, p._2, p._3, p._4, p._5, p._6)
implicit def zipTuple7[A, B, C, D, E, F, G](p: (Rep[A], Rep[B], Rep[C], Rep[D], Rep[E], Rep[F], Rep[G])): Rep[(A, (B, (C, (D, (E, (F, G))))))] =
Tuple(p._1, p._2, p._3, p._4, p._5, p._6, p._7)
implicit def zipTuple8[A, B, C, D, E, F, G, H](p: (Rep[A], Rep[B], Rep[C], Rep[D], Rep[E], Rep[F], Rep[G], Rep[H])): Rep[(A, (B, (C, (D, (E, (F, (G, H)))))))] =
Tuple(p._1, p._2, p._3, p._4, p._5, p._6, p._7, p._8)
implicit def zipTuple9[A, B, C, D, E, F, G, H, I](p: (Rep[A], Rep[B], Rep[C], Rep[D], Rep[E], Rep[F], Rep[G], Rep[H], Rep[I])): Rep[(A, (B, (C, (D, (E, (F, (G, (H, I))))))))] =
Tuple(p._1, p._2, p._3, p._4, p._5, p._6, p._7, p._8, p._9)
implicit def zipTuple16[A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P](p: (Rep[A], Rep[B], Rep[C], Rep[D], Rep[E], Rep[F], Rep[G], Rep[H], Rep[I], Rep[J], Rep[K], Rep[L], Rep[M], Rep[N], Rep[O], Rep[P])): Rep[(A, (B, (C, (D, (E, (F, (G, (H, (I, (J, (K, (L, (M, (N, (O, P)))))))))))))))] =
Tuple(p._1, p._2, p._3, p._4, p._5, p._6, p._7, p._8, p._9, p._10, p._11, p._12, p._13, p._14, p._15, p._16)
object Tuple {
def apply[A, B](a: Rep[A], b: Rep[B]) = Pair(a, b)
def apply[A, B, C](a: Rep[A], b: Rep[B], c: Rep[C]): Rep[(A, (B, C))] =
Pair(a, Pair(b, c))
def apply[A, B, C, D](a: Rep[A], b: Rep[B], c: Rep[C], d: Rep[D]): Rep[(A, (B, (C, D)))] =
Pair(a, Pair(b, Pair(c, d)))
def apply[A, B, C, D, E](a: Rep[A], b: Rep[B], c: Rep[C], d: Rep[D], e: Rep[E]): Rep[(A, (B, (C, (D, E))))] =
Pair(a, Pair(b, Pair(c, Pair(d, e))))
def apply[A, B, C, D, E, F](a: Rep[A], b: Rep[B], c: Rep[C], d: Rep[D], e: Rep[E], f: Rep[F]): Rep[(A, (B, (C, (D, (E, F)))))] =
Pair(a, Pair(b, Pair(c, Pair(d, Pair(e, f)))))
def apply[A, B, C, D, E, F, G](a: Rep[A], b: Rep[B], c: Rep[C], d: Rep[D], e: Rep[E], f: Rep[F], g: Rep[G]): Rep[(A, (B, (C, (D, (E, (F, G))))))] =
Pair(a, Pair(b, Pair(c, Pair(d, Pair(e, Pair(f, g))))))
def apply[A, B, C, D, E, F, G, H](a: Rep[A], b: Rep[B], c: Rep[C], d: Rep[D], e: Rep[E], f: Rep[F], g: Rep[G], h: Rep[H]): Rep[(A, (B, (C, (D, (E, (F, (G, H)))))))] =
Pair(a, Pair(b, Pair(c, Pair(d, Pair(e, Pair(f, Pair(g, h)))))))
def apply[A, B, C, D, E, F, G, H, I](a: Rep[A], b: Rep[B], c: Rep[C], d: Rep[D], e: Rep[E], f: Rep[F], g: Rep[G], h: Rep[H], i: Rep[I]): Rep[(A, (B, (C, (D, (E, (F, (G, (H, I))))))))] =
Pair(a, Pair(b, Pair(c, Pair(d, Pair(e, Pair(f, Pair(g, Pair(h, i))))))))
def apply[A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P](a: Rep[A], b: Rep[B], c: Rep[C], d: Rep[D], e: Rep[E], f: Rep[F], g: Rep[G], h: Rep[H], i: Rep[I], j: Rep[J], k: Rep[K], l: Rep[L], m: Rep[M], n: Rep[N], o: Rep[O], p: Rep[P]): Rep[(A, (B, (C, (D, (E, (F, (G, (H, (I, (J, (K, (L, (M, (N, (O, P)))))))))))))))] =
Pair(a, Pair(b, Pair(c, Pair(d, Pair(e, Pair(f, Pair(g, Pair(h, Pair(i, Pair(j, Pair(k, Pair(l, Pair(m, Pair(n, Pair(o, p)))))))))))))))
def unapply[A, B](p: Rep[(A, B)]) = Some((p._1, p._2))
def unapply[A, B, C](p: Rep[(A, (B, C))])(implicit o: Overloaded1) =
Some((p._1, p._2, p._3))
def unapply[A, B, C, D](p: Rep[(A, (B, (C, D)))])(implicit o: Overloaded2) =
Some((p._1, p._2, p._3, p._4))
def unapply[A, B, C, D, E](p: Rep[(A, (B, (C, (D, E))))])(implicit o: Overloaded3) =
Some((p._1, p._2, p._3, p._4, p._5))
def unapply[A, B, C, D, E, F](p: Rep[(A, (B, (C, (D, (E, F)))))])(implicit o: Overloaded4) =
Some((p._1, p._2, p._3, p._4, p._5, p._6))
def unapply[A, B, C, D, E, F, G](p: Rep[(A, (B, (C, (D, (E, (F, G))))))])(implicit o: Overloaded5) =
Some((p._1, p._2, p._3, p._4, p._5, p._6, p._7))
def unapply[A, B, C, D, E, F, G, H](p: Rep[(A, (B, (C, (D, (E, (F, (G, H)))))))])(implicit o1: Overloaded1, o2: Overloaded1) =
Some((p._1, p._2, p._3, p._4, p._5, p._6, p._7, p._8))
def unapply[A, B, C, D, E, F, G, H, I](p: Rep[(A, (B, (C, (D, (E, (F, (G, (H, I))))))))])(implicit o: Overloaded6) =
Some((p._1, p._2, p._3, p._4, p._5, p._6, p._7, p._8, p._9))
def unapply[A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P](p: Rep[(A, (B, (C, (D, (E, (F, (G, (H, (I, (J, (K, (L, (M, (N, (O, P)))))))))))))))])(implicit o: Overloaded7) =
Some((p._1, p._2, p._3, p._4, p._5, p._6, p._7, p._8, p._9, p._10, p._11, p._12, p._13, p._14, p._15, p._16))
}
}
trait TuplesStd extends Tuples { self: ScalanStd =>
def unzipPair[A, B](p: Rep[(A, B)]): (Rep[A], Rep[B]) = p
implicit def zipPair[A, B](p: (Rep[A], Rep[B])): Rep[(A, B)] = p
}
trait TuplesExp extends Tuples with BaseExp { self: ScalanExp =>
val tuplesCache = scala.collection.mutable.HashMap.empty[Rep[_], (Rep[_], Rep[_])]
def unzipPair[A, B](p: Rep[(A, B)]): (Rep[A], Rep[B]) = p match {
case Def(Tup(a, b)) => (a, b)
case _ => p.elem match {
case pe: PairElem[_, _] =>
implicit val eA = pe.eFst
implicit val eB = pe.eSnd
if (cachePairs) {
if (!tuplesCache.contains(p)) {
tuplesCache.update(p, (First(p), Second(p)))
}
tuplesCache(p).asInstanceOf[(Rep[A], Rep[B])]
}
else
(First(p), Second(p))
case _ =>
!!!(s"expected Tup[A,B] or Sym with type (A,B) but got ${p.toStringWithDefinition}", p)
}
}
implicit def zipPair[A, B](p: (Exp[A], Exp[B])): Rep[(A, B)] = {
implicit val ea = p._1.elem
implicit val eb = p._2.elem
Tup(p._1, p._2)
}
case class Tup[A, B](a: Exp[A], b: Exp[B]) extends Def[(A, B)] {
implicit val eA: Elem[A] = a.elem
implicit val eB: Elem[B] = b.elem
assert(null != eA && null != eB)
lazy val selfType = element[(A,B)]
}
case class First[A, B](pair: Exp[(A, B)]) extends Def[A] {
val selfType: Elem[A] = pair.elem.eFst
}
case class Second[A, B](pair: Exp[(A, B)]) extends Def[B] {
val selfType: Elem[B] = pair.elem.eSnd
}
object TupleProjection {
def apply[A,B](t: Exp[(A,B)], i: Int): ExpAny = i match {
case 1 => t._1
case 2 => t._2
}
def unapply(p: ExpAny): Option[Int] = p match {
case Def(First(_)) => Some(1)
case Def(Second(_)) => Some(2)
case _ => None
}
}
def projectionIndex(p: ExpAny): Int = p match {
case TupleProjection(i) => i
case _ => !!!("tuple projection expected", p)
}
override def rewriteDef[T](d: Def[T]) = d match {
case First(Def(Tup(a, b))) => a
case Second(Def(Tup(a, b))) => b
case Tup(Def(First(a)), Def(Second(b))) if a == b => a
case _ => super.rewriteDef(d)
}
}
|
scalan/scalan
|
core/src/main/scala/scalan/primitives/Tuples.scala
|
Scala
|
apache-2.0
| 13,400 |
package com.github.jlprat.gameserver.fsm.model
import akka.actor.ActorRef
import Card._
/**
* Helper class that holds an actor and its internal business ID
* @param actorRef the actorRef
* @param id the business Id of the Player
* TODO: refactor this id to its own object
*/
case class PlayerInfo(actorRef: ActorRef, id: Int)
/**
* Created by josep on 6/30/15.
*/
sealed trait TableData
case class Initial(activePlayers: Vector[PlayerInfo]) extends TableData
case class DefaultData(activePlayer: PlayerInfo,
remainingPlayers: Vector[PlayerInfo],
deck: Deck,
discardPile: DiscardPile,
clockwise: Boolean) extends TableData
case class WithPenaltyData(activePlayer: PlayerInfo,
remainingPlayers: Vector[PlayerInfo],
deck: Deck,
discardPile: DiscardPile,
clockwise: Boolean,
penaltyCards: Int) extends TableData
case class OverridenSuitData(activePlayer: PlayerInfo,
remainingPlayers: Vector[PlayerInfo],
deck: Deck,
discardPile: DiscardPile,
clockwise: Boolean,
suit: Suit) extends TableData
|
jlprat/akka-gameserver
|
src/main/scala/com/github/jlprat/gameserver/fsm/model/TableData.scala
|
Scala
|
apache-2.0
| 1,375 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.eventhubscommon.client
import java.net.SocketTimeoutException
import java.time.{Duration, Instant}
import scala.collection.mutable.ListBuffer
import scala.concurrent.{Await, Future}
import scala.concurrent.duration._
import scala.util.{Failure, Success}
import scala.xml.XML
import com.microsoft.azure.eventhubs.SharedAccessSignatureTokenProvider
import scalaj.http.{Http, HttpResponse}
import org.apache.spark.eventhubscommon.EventHubNameAndPartition
import org.apache.spark.internal.Logging
/**
* a Restful API based client of EventHub
*
* @param eventHubNamespace the namespace of eventhub
* @param numPartitionsEventHubs a map from eventHub name to the total number of partitions
* @param consumerGroups a map from eventHub name to consumer group names
* @param policyKeys a map from eventHub name to (policyName, policyKey) pair
* @param threadNum the number of threads used to communicate with remote EventHub
*/
private[spark] class RestfulEventHubClient(
eventHubNamespace: String,
numPartitionsEventHubs: Map[String, Int],
consumerGroups: Map[String, String],
policyKeys: Map[String, (String, String)],
threadNum: Int) extends EventHubClient with Logging {
private val RETRY_INTERVAL_SECONDS = Array(8, 16, 32, 64, 128)
// will be used to execute requests to EventHub
import org.apache.spark.eventhubscommon.Implicits.exec
private def createSasToken(eventHubName: String, policyName: String, policyKey: String):
String = {
// the default value of 10 mins is hardcoded, and this method will be called for everytime when
// a new batch is started, we may figure out whether there will be any negative impact for
// creating a new sasToken everytime
SharedAccessSignatureTokenProvider.generateSharedAccessSignature(
s"$policyName", s"$policyKey",
s"$eventHubNamespace.servicebus.windows.net/$eventHubName",
Duration.ofMinutes(10))
}
private def fromResponseBodyToEndpoint(responseBody: String): (Long, Long) = {
val partitionDescription = XML.loadString(responseBody) \\ "entry" \
"content" \ "PartitionDescription"
((partitionDescription \ "LastEnqueuedOffset").text.toLong,
(partitionDescription \ "EndSequenceNumber").text.toLong)
}
private def fromParametersToURLString(eventHubName: String, partitionId: Int): String = {
s"https://$eventHubNamespace.servicebus.windows.net/$eventHubName" +
s"/consumergroups/${consumerGroups(eventHubName)}/partitions/$partitionId?api-version=2015-01"
}
private def fromResponseBodyToStartSeq(responseBody: String): Long = {
val partitionDescription = XML.loadString(responseBody) \\ "entry" \
"content" \ "PartitionDescription"
(partitionDescription \ "BeginSequenceNumber").text.toLong
}
private def aggregateResults[T](undergoingRequests: List[Future[(EventHubNameAndPartition, T)]]):
Option[Map[EventHubNameAndPartition, T]] = {
Await.ready(Future.sequence(undergoingRequests), 60 seconds).value.get match {
case Success(queryResponse) =>
Some(queryResponse.toMap.map {case (eventHubQueryKey, queryResponseString) =>
(eventHubQueryKey, queryResponseString.asInstanceOf[T])})
case Failure(e) =>
e.printStackTrace()
None
}
}
private def composeQuery[T](
retryIfFail: Boolean,
fromResponseBodyToResult: String => T,
nameAndPartition: EventHubNameAndPartition):
Future[(EventHubNameAndPartition, T)] = {
Future {
var retryTime = 0
var successfullyFetched = false
var response: HttpResponse[String] = null
val ehNameAndPartition = nameAndPartition
val eventHubName = nameAndPartition.eventHubName
val partitionId = nameAndPartition.partitionId
while (!successfullyFetched) {
logDebug(s"start fetching latest offset of $ehNameAndPartition")
val urlString = fromParametersToURLString(eventHubName, partitionId)
try {
response = Http(urlString).header("Authorization",
createSasToken(eventHubName,
policyName = policyKeys(eventHubName)._1,
policyKey = policyKeys(eventHubName)._2)).
header("Content-Type", "application/atom+xml;type=entry;charset=utf-8").
timeout(connTimeoutMs = 3000, readTimeoutMs = 30000).asString
if (response.code != 200) {
if (!retryIfFail || retryTime > RETRY_INTERVAL_SECONDS.length - 1) {
val errorInfoString = s"cannot get latest offset of" +
s" $ehNameAndPartition, status code: ${response.code}, ${response.headers}" +
s" returned error: ${response.body}"
logError(errorInfoString)
throw new Exception(errorInfoString)
} else {
val retryInterval = 1000 * RETRY_INTERVAL_SECONDS(retryTime)
logError(s"cannot get connect with Event Hubs Rest Endpoint for partition" +
s" $ehNameAndPartition, retry after $retryInterval seconds")
Thread.sleep(retryInterval)
retryTime += 1
}
} else {
successfullyFetched = true
}
} catch {
case e: SocketTimeoutException =>
e.printStackTrace()
logError("Event Hubs return ReadTimeout with 30s as threshold, retrying...")
case e: Exception =>
e.printStackTrace()
throw e
}
}
val results = fromResponseBodyToResult(response.body)
logDebug(s"results of $ehNameAndPartition: $results")
(ehNameAndPartition, results)
}
}
private def queryPartitionRuntimeInfo[T](
targetEventHubsNameAndPartitions: List[EventHubNameAndPartition],
fromResponseBodyToResult: String => T, retryIfFail: Boolean):
Option[Map[EventHubNameAndPartition, T]] = {
val futures = new ListBuffer[Future[(EventHubNameAndPartition, T)]]
if (targetEventHubsNameAndPartitions.isEmpty) {
for ((eventHubName, numPartitions) <- numPartitionsEventHubs;
partitionId <- 0 until numPartitions) {
futures += composeQuery(retryIfFail, fromResponseBodyToResult,
EventHubNameAndPartition(eventHubName, partitionId))
}
} else {
for (targetNameAndPartition <- targetEventHubsNameAndPartitions) {
futures += composeQuery(retryIfFail, fromResponseBodyToResult, targetNameAndPartition)
}
}
aggregateResults(futures.toList)
}
override def close(): Unit = {
// empty
}
/**
* return highest offset/seq and latest enqueueTime of each partition
*/
override def endPointOfPartition(
retryIfFail: Boolean,
targetEventHubsNameAndPartitions: List[EventHubNameAndPartition]):
Option[Map[EventHubNameAndPartition, (Long, Long)]] = {
queryPartitionRuntimeInfo(targetEventHubsNameAndPartitions,
fromResponseBodyToEndpoint, retryIfFail)
}
private def fromResponseBodyToEnqueueTime(responseBody: String): Long = {
val partitionDescription = XML.loadString(responseBody) \\ "entry" \
"content" \ "PartitionDescription"
Instant.parse((partitionDescription \ "LastEnqueuedTimeUtc").text).getEpochSecond
}
/**
* return the last enqueueTime of each partition
* @return a map from eventHubsNamePartition to EnqueueTime
*/
override def lastEnqueueTimeOfPartitions(
retryIfFail: Boolean,
targetEventHubNameAndPartitions: List[EventHubNameAndPartition]):
Option[Map[EventHubNameAndPartition, Long]] = {
queryPartitionRuntimeInfo(targetEventHubNameAndPartitions,
fromResponseBodyToEnqueueTime, retryIfFail)
}
/**
* return the start seq number of each partition
*
* @return a map from eventhubName-partition to seq
*/
override def startSeqOfPartition(
retryIfFail: Boolean,
targetEventHubNameAndPartitions: List[EventHubNameAndPartition]):
Option[Map[EventHubNameAndPartition, Long]] = {
queryPartitionRuntimeInfo(targetEventHubNameAndPartitions,
fromResponseBodyToStartSeq, retryIfFail)
}
}
private[spark] object RestfulEventHubClient {
def getInstance(eventHubNameSpace: String, eventhubsParams: Map[String, Map[String, String]]):
RestfulEventHubClient = {
new RestfulEventHubClient(eventHubNameSpace,
numPartitionsEventHubs = {
eventhubsParams.map { case (eventhubName, params) => (eventhubName,
params("eventhubs.partition.count").toInt)
}
},
consumerGroups = {
eventhubsParams.map { case (eventhubName, params) => (eventhubName,
params("eventhubs.consumergroup"))
}
},
policyKeys = eventhubsParams.map { case (eventhubName, params) => (eventhubName,
(params("eventhubs.policyname"), params("eventhubs.policykey")))
},
threadNum = 15)
}
}
|
CodingCat/spark-eventhubs
|
core/src/main/scala/org/apache/spark/eventhubscommon/client/RestfulEventHubClient.scala
|
Scala
|
apache-2.0
| 9,677 |
package at.ac.tuwien.ifs.ir.evaluation
import java.io.File
import java.nio.file.{Files, Paths}
import at.ac.tuwien.ifs.io.TXTFile
import at.ac.tuwien.ifs.ir.model._
import scala.sys.process._
import scala.util.Random
/**
* Created by aldo on 10/19/14.
*/
class TRECEval(tempDir: String = ".", rounding: Boolean = true) {
val temp: String = TRECEval.makeDir(tempDir)
//TRECEval.clearFolder(temp)
def avgInt(vs: Seq[Int]): Double =
vs.sum.toDouble / vs.size
def avg(vs: Seq[Double]): Double =
avg(vs, vs.size)
def avg(vs: Seq[Double], den: Int): Double =
vs.sum / den
def round(num: Double): Double = if (rounding) Math.round(num * 10000).toDouble / 10000 else num
def recall(run: Run, qRel: QRel): Double =
if (qRel.sizeRel == 0) {
0d
} else {
num_ret_rel(run, qRel).toDouble / qRel.sizeRel
}
def rbpw_p(p: Float, rank: Int): Double = (1d - p) * Math.pow(p, rank - 1)
def rbp_p(p: Float, run: Run, qRel: QRel): Double =
run.runRecords.map(rR => if (qRel.getRel(rR.document) > 0)
rbpw_p(p, rR.rank) else 0d).sum
def ap(run: Run, qRel: QRel): Double = {
if (qRel.sizeRel != 0)
run.runRecords.map(rR =>
if (qRel.getRel(rR.document) > 0) {
p_n(rR.rank, run, qRel)
} else 0d).sum / qRel.sizeRel
else
0d
}
def dcg(run: Run, qRel: QRel): Double =
run.runRecords.map(rR => if (qRel.getRel(rR.document) > 0)
qRel.getRel(rR.document).toDouble / Math.log(rR.rank + 1) * Math.log(2) else 0d).sum
def idcg(qRel: QRel): Double =
if (qRel.sizeRel != 0)
qRel.qrelRecords.filter(_.rel > 0).sortBy(-_.rel).zipWithIndex.map(di =>
di._1.rel.toDouble / Math.log(di._2 + 2) * Math.log(2)).sum
else
0d
def num_ret_rels(runs: Runs, qRels: QRels): List[Int] =
runs.runs.withFilter(r => r != null && qRels.topicIds.contains(r.id)).map(run =>
num_ret_rel(run, qRels.topicQRels(run.id)))
def num_ret_rel(runs: Runs, qRels: QRels): Int =
runs.runs.withFilter(r => r != null && qRels.topicIds.contains(r.id)).map(run =>
num_ret_rel(run, qRels.topicQRels(run.id))).sum
def recall(runs: Runs, qRels: QRels): List[Double] =
runs.runs.withFilter(r => r != null && qRels.topicIds.contains(r.id)).map(run =>
recall(run, qRels.topicQRels(run.id)))
def rbp_p(p: Float, runs: Runs, qRels: QRels): Double = avg(
runs.runs.withFilter(r => r != null && qRels.topicIds.contains(r.id)).map(run =>
rbp_p(p, run, qRels.topicQRels(run.id)))
)
def map(runs: Runs, qRels: QRels): Double = avg(
runs.runs.withFilter(r => r != null && qRels.topicIds.contains(r.id)).map(run => {
ap(run, qRels.topicQRels(run.id))
}))
def ndcg(runs: Runs, qRels: QRels): Double = avg(
runs.runs.withFilter(r => r != null && qRels.topicIds.contains(r.id)).map(run => {
val den = idcg(qRels.topicQRels(run.id))
if (den != 0)
dcg(run, qRels.topicQRels(run.id)) / den
else
0d
}))
def p(n: Int, run: Run, qRel: QRel): Double =
num_ret_rel(run, qRel).toDouble / n
def num_ret_rel(run: Run, qRel: QRel): Int =
run.runRecords.map(rR => if (qRel.getRel(rR.document) > 0) 1 else 0).sum
def cut(n: Int, run: Run): Run = new Run(run.id, run.runRecords.take(n))
def cut(n: Int, runs: Runs): Runs =
new Runs(runs.id, runs.runs.withFilter(_ != null).map(run => cut(n, run)))
def p_n(n: Int, run: Run, qRel: QRel): Double =
num_ret_rel(cut(n, run), qRel).toDouble / n
def p_n(n: Int, runs: Runs, qRels: QRels): Double =
avgInt(
num_ret_rels(cut(n, runs), qRels)
) / n
def recall_n(n: Int, runs: Runs, qRels: QRels): Double =
avg(
recall(cut(n, runs), qRels) //, qRels.sizeTopics
)
def computeMetricPerTopic(metric: String, runs: Runs, qRels: QRels): Map[Int, Double] =
qRels.qRels.map(qRel => (qRel.id -> computeMetric(metric, runs, qRels.getTopicQRels(qRel.id)))).toMap
def computeRawMetricPerTopic(metric: String, runs: Runs, qRels: QRels): Map[Int, Double] =
qRels.qRels.map(qRel => (qRel.id -> computeRawMetric(metric, runs, qRels.getTopicQRels(qRel.id)))).toMap
def computeRawMetric(metric: String, runs: Runs, qRels: QRels): Double = {
if (metric.startsWith("P_")) {
val n = metric.split("_").last.toInt
p_n(n, runs, qRels)
} else if (metric.startsWith("AP_")) {
val n = metric.split("_").last.toInt
p_n(n, runs, qRels.inverse)
} else if (metric.startsWith("recall_")) {
val n = metric.split("_").last.toInt
recall_n(n, runs, qRels)
} else if (metric.startsWith("RBP_")) {
val p = metric.split("_").last.toFloat
rbp_p(p, runs, qRels)
} else if (metric.startsWith("map")) {
map(runs, qRels)
} else if (metric.startsWith("ndcg")) {
ndcg(runs, qRels)
} else if (metric.startsWith("num_ret_rel")) {
num_ret_rel(runs, qRels)
} else
computeUnsupportedMetric(metric: String, runs: Runs, qRels: QRels)
}
def computeMetric(metric: String, runs: Runs, qRels: QRels): Double = {
if (metric.startsWith("P_")) {
val n = metric.split("_").last.toInt
round(p_n(n, runs, qRels))
} else if (metric.startsWith("AP_")) {
val n = metric.split("_").last.toInt
round(p_n(n, runs, qRels.inverse))
} else if (metric.startsWith("recall_")) {
val n = metric.split("_").last.toInt
round(recall_n(n, runs, qRels))
} else if (metric.startsWith("RBP_")) {
val p = metric.split("_").last.toFloat
round(rbp_p(p, runs, qRels))
} else if (metric.startsWith("map")) {
round(map(runs, qRels))
} else if (metric.startsWith("ndcg")) {
round(ndcg(runs, qRels))
} else if (metric.startsWith("num_ret_rel")) {
num_ret_rel(runs, qRels)
} else
computeUnsupportedMetric(metric: String, runs: Runs, qRels: QRels)
}
def computeUnsupportedMetricPerTopic(metric: String, runs: Runs, qRels: QRels) = {
qRels.qRels.map(qRel => (qRel.id -> {
computeUnsupportedMetric(metric, runs, qRels.getTopicQRels(qRel.id))
})).toMap
}
def computeUnsupportedMetric(metric: String, runs: Runs, qRels: QRels): Double = {
def getRandomString: String = {
val rS = TRECEval.getRandomString
if ((new File(tempDir, "runs." + rS)).exists) getRandomString else rS
}
val rS = getRandomString
val runsP = new File(tempDir, "runs." + rS).getCanonicalPath
val qRelsP = new File(tempDir, "qRels." + rS).getCanonicalPath
TXTFile.writeFile(runsP, runs.toString)
TXTFile.writeFile(qRelsP, qRels.toString)
try {
val value = TRECEval.computeMetric(metric, runsP, qRelsP)
TRECEval.deleteFile(runsP)
TRECEval.deleteFile(qRelsP)
value
} catch {
case e: Exception => {
println(metric)
println(runsP, runs.toString.length)
println(qRelsP, qRels.toString.length)
println(s"trec_eval $qRelsP $runsP" !!)
//println((s"trec_eval $qRelsP $runsP" #| s"grep ^$metric\\s" !!).split("\t").last)
throw e
}
}
}
def computeUnsupportedMetric(metric: String, runs: String, qRels: String): Double = {
TRECEval.computeMetric(metric, runs, qRels)
}
def computeAntiMetric(metric: String, runs: Runs, qRels: QRels) = computeMetric(metric, runs, qRels.inverse)
def computeRawAntiMetric(metric: String, runs: Runs, qRels: QRels) = computeRawMetric(metric, runs, qRels.inverse)
def computeAntiMetricPerTopic(metric: String, runs: Runs, qRels: QRels) = computeMetricPerTopic(metric, runs, qRels.inverse)
def computeRawAntiMetricPerTopic(metric: String, runs: Runs, qRels: QRels) = computeRawMetricPerTopic(metric, runs, qRels.inverse)
def computeMAP(runs: Runs, qRels: QRels) = computeMetric("map", runs, qRels)
def computeAntiMAP(runs: Runs, qRels: QRels) = computeMetric("map", runs, qRels.inverse)
def computeNDCG(runs: Runs, qRels: QRels) = computeMetric("ndcg", runs, qRels)
def computeP5(runs: Runs, qRels: QRels) = computeMetric("P_5", runs, qRels)
def computeRecall5(runs: Runs, qRels: QRels) = computeMetric("recall_5", runs, qRels)
def computeP10(runs: Runs, qRels: QRels) = computeMetric("P_10", runs, qRels)
def computeRecall10(runs: Runs, qRels: QRels) = computeMetric("recall_10", runs, qRels)
def computeP15(runs: Runs, qRels: QRels) = computeMetric("P_15", runs, qRels)
def computeRecall15(runs: Runs, qRels: QRels) = computeMetric("recall_15", runs, qRels)
def computeP20(runs: Runs, qRels: QRels) = computeMetric("P_20", runs, qRels)
def computeRecall20(runs: Runs, qRels: QRels) = computeMetric("recall_20", runs, qRels)
def computeP30(runs: Runs, qRels: QRels) = computeMetric("P_30", runs, qRels)
def computeRecall30(runs: Runs, qRels: QRels) = computeMetric("recall_30", runs, qRels)
def computeP100(runs: Runs, qRels: QRels) = computeMetric("P_100", runs, qRels)
def computeRecall100(runs: Runs, qRels: QRels) = computeMetric("recall_100", runs, qRels)
def computeNumRel(runs: Runs, qRels: QRels) = computeMetric("num_rel", runs, qRels)
def computeNumRet(runs: Runs, qRels: QRels) = computeMetric("num_ret", runs, qRels)
def computeAntiP5(runs: Runs, qRels: QRels) = computeMetric("P_5", runs, qRels.inverse)
def computeAntiP10(runs: Runs, qRels: QRels) = computeMetric("P_10", runs, qRels.inverse)
def computeAntiP15(runs: Runs, qRels: QRels) = computeMetric("P_15", runs, qRels.inverse)
def computeAntiP20(runs: Runs, qRels: QRels) = computeMetric("P_20", runs, qRels.inverse)
def computeAntiP30(runs: Runs, qRels: QRels) = computeMetric("P_30", runs, qRels.inverse)
def computeAntiP100(runs: Runs, qRels: QRels) = computeMetric("P_100", runs, qRels.inverse)
def computeAntiNumRel(runs: Runs, qRels: QRels) = computeMetric("num_rel", runs, qRels.inverse)
def getScores(qRels: QRels, runs: List[Runs], metric: String): List[Score] =
runs.map(run => {
val score = new Score(run.id,
this.computeMetric(metric, run, qRels),
metric, qRels)
score
})
}
object TRECEval {
def apply(tempDir: String = ".") = new TRECEval(tempDir)
def computeMetric(metric: String, runs: String, qRels: String): Double = {
val jm = metric.split("_").head
//println(s"trec_eval -m $jm $qRels $runs"!)
(s"trec_eval -m $jm $qRels $runs" #| s"grep ^$metric\\s" !!).trim.split("\t").last.toDouble
}
def makeDir(name: String) = {
val dirF = new File(name)
if (!dirF.exists) dirF.mkdir
dirF.getName
}
def deleteFile(path: String) =
Files.delete(Paths.get(path))
def clearFolder(name: String) = {
val listFiles = (new File(name)).listFiles
if (listFiles != null) listFiles.map(_.delete)
}
private def getRandomString = 1 to 10 map (i => Random.nextInt(10)) mkString ("")
}
|
aldolipani/PoolBiasEstimators
|
src/main/scala/at/ac/tuwien/ifs/ir/evaluation/TRECEval.scala
|
Scala
|
apache-2.0
| 10,814 |
/*
* Author: Manish Gupta
*/
package com.guptam.spark.dba.hdfsutil
import java.io.BufferedInputStream
import java.io.FileInputStream
import java.io.InputStream
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{ Path, FileSystem, FileStatus }
import scala.collection.mutable.ListBuffer
import com.guptam.spark.dba.common._
class HadoopFileSystemOperation(sPathToConfig: String) extends HDFSStringUtil {
// Create a new Hadoop Conf and get a FileSystem object
val conf: Configuration = new Configuration()
if (!sPathToConfig.isEmpty()) {
conf.addResource(new Path(removeLastSlash(sPathToConfig) + "/core-site.xml"))
conf.addResource(new Path(removeLastSlash(sPathToConfig) + "/hdfs-site.xml"))
}
// File system object
val fileSystem = FileSystem.get(conf)
// Write a list of string to file on hdfs
def createAndWriteFileFromListBuffer(
fileName: String,
data: List[String],
overwrite: Boolean): Unit = {
val os = fileSystem.create(getPath(fileName), overwrite)
os.writeBytes(data.mkString("\n"))
os.close()
}
// rename a the directory
def renameDirectory(from: String, to: String): Boolean = {
if (isValidDirectory(from)) {
fileSystem.rename(getPath(from), getPath(to)) // return true is succesfull
} else {
false
}
}
// Get listing of all files and statistics
def getFileStatisticsExcludingDotUnderscore(location: String): Option[Array[FileStatus]] = {
if (isValidDirectory(location)) {
Some(fileSystem.listStatus(getPath(location)).filter(fileStatus =>
!(fileStatus.getPath().getName().startsWith(Constants.UNDERSCORE)
|| fileStatus.getPath().getName().startsWith(Constants.DOT))))
} else {
None
}
}
// Get listing of all files and statistics
def getFileStatistics(location: String): Option[Array[FileStatus]] = {
if (isValidDirectory(location)) {
Some(fileSystem.listStatus(getPath(location)))
} else {
None
}
}
// Get Filesystems working directory
def getWorkingDirectory(): String = {
removeLastSlash(fileSystem.getWorkingDirectory().toString())
}
// compare 2 File Statuses
def equalsFileStatistics(fs1: Array[FileStatus], fs2: Array[FileStatus]): Boolean = {
if (fs1.sameElements(fs2)) {
true
} else {
false
}
}
// This is extremely slow
def listAllFiles(location: String, recursive: Boolean): List[String] = {
val lst = fileSystem.listFiles(
getPath(location), true)
val result = new ListBuffer[String]()
while (lst.hasNext()) {
result += lst.next().getPath().toString()
}
result.toList
}
// Copy a local file to HDFS
def copyFromLocal(srcFile: String, targetFolder: String, overwriteTarget: Boolean): Unit = {
val sourcePath = getPath(srcFile)
val targetFolderPath = getPath(targetFolder)
if (fileSystem.isFile(sourcePath) && fileSystem.isDirectory(targetFolderPath)) {
fileSystem.copyFromLocalFile(false, overwriteTarget, sourcePath, targetFolderPath)
}
}
// delete a single file from hdfs location
def delete(filename: String): Unit = {
val path = getPath(filename)
if (fileSystem.isFile(path)) {
fileSystem.delete(path, false)
}
}
// Get File from Hadoop into an input Stream
def getFile(filename: String): InputStream = {
val path = getPath(filename)
fileSystem.open(path)
}
// get Block Size
def getOutputBlockSize(outputPath: Path): Long = {
fileSystem.getDefaultBlockSize(outputPath)
}
// get Block Size (String)
def getOutputBlockSize(outputPath: String): Long = {
fileSystem.getDefaultBlockSize(getPath(outputPath))
}
// if the path is a valid file
def isValidFile(location: String): Boolean = {
val path = getPath(location)
if (fileSystem.isFile(path)) {
true
} else {
false
}
}
// if the file exists
def isValidDirectory(location: String): Boolean = {
val path = getPath(location)
if (fileSystem.isDirectory(path)) {
true
} else {
false
}
}
// if the path is already existing as file or folder
def pathExists(location: String): Boolean = {
if (fileSystem.isFile(getPath(location)) || fileSystem.isDirectory(getPath(location))) {
true
} else {
false
}
}
// If the File is an Ignore File
def isIgnoreFile(location: Path): Boolean = {
val strLocation = location.getName().toLowerCase()
if (strLocation.startsWith(Constants.DOT) || strLocation.startsWith(Constants.UNDERSCORE)) {
true
} else {
false
}
}
// get Child folders path in parent
def getPartitionPath(mainFolder: String, childFolder: String): String = {
getResolvedPath(childFolder).toString().replace(getResolvedPath(mainFolder).toString(), "")
}
// get fully qualified path from string
// This will work only on existing folders
def getResolvedPath(location: String): Path = {
fileSystem.resolvePath(new Path(location))
}
// get path from string
def getPath(location: String): Path = {
new Path(location)
}
// Create a new folder
def createFolder(folderPath: String): Unit = {
val path = getPath(folderPath)
if (!fileSystem.exists(path)) {
fileSystem.mkdirs(path)
}
}
// Delete a folder
def deleteFolder(folderPath: String, forced: Boolean): Unit = {
val path = getPath(folderPath)
if (fileSystem.exists(path) && fileSystem.isDirectory(path)) {
fileSystem.delete(path, forced) // recursive delete can cause cluster fuck
}
}
}
|
guptam/spark-dba
|
src/com/guptam/spark/dba/hdfsutil/HadoopFileSystemOperation.scala
|
Scala
|
apache-2.0
| 5,791 |
package scalaxy.compilets; package test
import org.junit._
import Assert._
import scalaxy.compilets.fail
import scala.reflect.runtime.universe._
import scala.reflect.ClassTag
class TestMacros
{
/*
@Test
def testReplaceTypeVar {
def rep[T](v: T) = replace(v.toString, "?")
println("rep(12) = " + rep(12))
}
*/
import math.Numeric.Implicits._
import Ordering.Implicits._
def plus[T : TypeTag : Numeric](a: T, b: T) = replace(
a + b, // Numeric.Implicits.infixNumericOps[T](a)(n).+(b)
implicitly[Numeric[T]].plus(a, b)
)
@Test
def testReplace {
replace(1, 1) match {
case Replacement(
Expr(Literal(Constant(1))),
Expr(Literal(Constant(1)))
) =>
}
}
@Test
def testFail {
fail("hehe") { 1 } match {
case MatchError(
Expr(Literal(Constant(1))),
"hehe"
) =>
}
}
@Test
def testWarn {
warn("hehe") { 1 } match {
case MatchWarning(
Expr(Literal(Constant(1))),
"hehe"
) =>
}
}
@Test
def testWarning {
warning[Unit]("hehe") match {
case Warning("hehe") =>
}
}
@Test
def testError {
error[Unit]("hehe") match {
case Error("hehe") =>
}
}
@Test
def testReplacement {
replacement(1) match {
case ReplaceBy(Expr(Literal(Constant(1)))) =>
case v =>
assertTrue("got " + v, false)
}
}
}
|
nativelibs4java/Scalaxy
|
Obsolete/Compilets/API/src/test/scala/scalaxy/TestMacros.scala
|
Scala
|
bsd-3-clause
| 1,408 |
/*
* Copyright 2001-2008 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest.junit
import org.scalatest._
import collection.immutable.TreeSet
import helpers._
import org.scalatest.events._
class JUnit3SuiteSpec extends FunSpec with SharedHelpers {
describe("A JUnit3Suite") {
it("should return the test names in alphabetical order from testNames") {
val a = new JUnit3Suite {
def testThis() {}
def testThat() {}
}
expect(List("testThat", "testThis")) {
a.testNames.iterator.toList
}
val b = new JUnit3Suite {}
expect(List[String]()) {
b.testNames.iterator.toList
}
val c = new JUnit3Suite {
def testThat() {}
def testThis() {}
}
expect(List("testThat", "testThis")) {
c.testNames.iterator.toList
}
}
it("should return the proper testNames for test methods whether or not they take an Informer") {
val a = new JUnit3Suite {
def testThis() = ()
def testThat(info: Informer) = ()
}
assert(a.testNames === TreeSet("testThis"))
val b = new JUnit3Suite {}
assert(b.testNames === TreeSet[String]())
}
it("should not return names of methods that start with test, take no params, but have a return type " +
"other than Unit from testNames") {
val a = new TestWithNonUnitMethod
assert(a.testNames === TreeSet("testThat", "testThis"))
}
it("should include in testNames a method simply named 'test', that takes no params and has a return type " +
"of Unit") {
val a = new TestWithMethodNamedTest
assert(a.testNames === TreeSet("test", "testThat", "testThis"))
}
it("should return an empty tags map from the tags method, because a tag-like concept isn't supported in JUnit 3") {
val a = new JUnit3Suite {
@Ignore
def testThis() = ()
def testThat(info: Informer) = ()
}
assert(a.testTags.isEmpty)
val b = new JUnit3Suite {
def testThis() = ()
@Ignore
def testThat(info: Informer) = ()
}
assert(b.testTags.isEmpty)
val c = new JUnit3Suite {
@Ignore
def testThis() = ()
@Ignore
def testThat(info: Informer) = ()
}
assert(c.testTags.isEmpty)
val d = new JUnit3Suite {
@SlowAsMolasses
def testThis() = ()
@SlowAsMolasses
@Ignore
def testThat(info: Informer) = ()
}
assert(d.testTags.isEmpty)
val e = new JUnit3Suite {}
assert(e.testTags.isEmpty)
}
it("should execute all tests when run is called with testName None") {
TestWasCalledSuite.reinitialize()
val b = new TestWasCalledSuite
b.run(None, SilentReporter, new Stopper {}, Filter(), Map(), None, new Tracker)
assert(TestWasCalledSuite.theTestThisCalled)
assert(TestWasCalledSuite.theTestThatCalled)
}
it("should execute one test when run is called with a defined testName") {
TestWasCalledSuite.reinitialize()
val a = new TestWasCalledSuite
a.run(Some("testThis"), SilentReporter, new Stopper {}, Filter(), Map(), None, new Tracker)
assert(TestWasCalledSuite.theTestThisCalled)
assert(!TestWasCalledSuite.theTestThatCalled)
}
it("should throw IllegalArgumentException if run is passed a testName that does not exist") {
val a = new TestWasCalledSuite
intercept[IllegalArgumentException] {
// Here, they forgot that the name is actually testThis(Fixture)
a.run(Some("misspelled"), SilentReporter, new Stopper {}, Filter(), Map(), None, new Tracker)
}
}
it("should run no tests if tags to include is non-empty") {
TestWasCalledSuite.reinitialize()
val a = new TestWasCalledSuite
a.run(None, SilentReporter, new Stopper {}, Filter(Some(Set("org.scalatest.SlowAsMolasses")), Set()), Map(), None, new Tracker)
assert(!TestWasCalledSuite.theTestThisCalled)
assert(!TestWasCalledSuite.theTestThatCalled)
}
it("should return the correct test count from its expectedTestCount method") {
val a = new ASuite
assert(a.expectedTestCount(Filter()) === 1)
val b = new BSuite
assert(b.expectedTestCount(Filter()) === 1)
val c = new CSuite
assert(c.expectedTestCount(Filter(Some(Set("org.scalatest.FastAsLight")), Set())) === 0)
assert(c.expectedTestCount(Filter(None, Set("org.scalatest.FastAsLight"))) === 1)
val d = new DSuite
assert(d.expectedTestCount(Filter(Some(Set("org.scalatest.FastAsLight")), Set())) === 0)
assert(d.expectedTestCount(Filter(Some(Set("org.scalatest.SlowAsMolasses")), Set("org.scalatest.FastAsLight"))) === 0)
assert(d.expectedTestCount(Filter(None, Set("org.scalatest.SlowAsMolasses"))) === 4)
assert(d.expectedTestCount(Filter()) === 4)
val e = new ESuite
assert(e.expectedTestCount(Filter(Some(Set("org.scalatest.FastAsLight")), Set())) === 0)
assert(e.expectedTestCount(Filter(Some(Set("org.scalatest.SlowAsMolasses")), Set("org.scalatest.FastAsLight"))) === 0)
assert(e.expectedTestCount(Filter(None, Set("org.scalatest.SlowAsMolasses"))) === 1)
assert(e.expectedTestCount(Filter()) === 1)
}
it("should generate a test failure if a Throwable, or an Error other than direct Error subtypes " +
"known in JDK 1.5, excluding AssertionError") {
val a = new ShouldFailSuite
val rep = new EventRecordingReporter
a.run(None, rep, new Stopper {}, Filter(), Map(), None, new Tracker())
val tf = rep.testFailedEventsReceived
assert(tf.size === 3)
}
}
}
|
epishkin/scalatest-google-code
|
src/test/scala/org/scalatest/junit/JUnit3SuiteSpec.scala
|
Scala
|
apache-2.0
| 6,275 |
package com.github.golem.command.administrative
import com.github.golem.command.CommandResponse
object GetVersion extends AdministrativeCommand {
case class Response(version: String) extends CommandResponse
}
|
pjarosik/golem
|
src/main/scala/com/github/golem/command/administrative/GetVersion.scala
|
Scala
|
gpl-2.0
| 214 |
package fpinscala.parallelism
import java.util.concurrent._
import language.implicitConversions
object Par {
type Par[A] = ExecutorService => Future[A]
def run[A](s: ExecutorService)(a: Par[A]): Future[A] = a(s)
def unit[A](a: A): Par[A] = (es: ExecutorService) => UnitFuture(a) // `unit` is represented as a function that returns a `UnitFuture`, which is a simple implementation of `Future` that just wraps a constant value. It doesn't use the `ExecutorService` at all. It's always done and can't be cancelled. Its `get` method simply returns the value that we gave it.
private case class UnitFuture[A](get: A) extends Future[A] {
def isDone = true
def get(timeout: Long, units: TimeUnit) = get
def isCancelled = false
def cancel(evenIfRunning: Boolean): Boolean = false
}
def map2[A,B,C](a: Par[A], b: Par[B])(f: (A,B) => C): Par[C] = // `map2` doesn't evaluate the call to `f` in a separate logical thread, in accord with our design choice of having `fork` be the sole function in the API for controlling parallelism. We can always do `fork(map2(a,b)(f))` if we want the evaluation of `f` to occur in a separate thread.
(es: ExecutorService) => {
val af = a(es)
val bf = b(es)
UnitFuture(f(af.get, bf.get)) // This implementation of `map2` does _not_ respect timeouts, and eagerly waits for the returned futures. This means that even if you have passed in "forked" arguments, using this map2 on them will make them wait. It simply passes the `ExecutorService` on to both `Par` values, waits for the results of the Futures `af` and `bf`, applies `f` to them, and wraps them in a `UnitFuture`. In order to respect timeouts, we'd need a new `Future` implementation that records the amount of time spent evaluating `af`, then subtracts that time from the available time allocated for evaluating `bf`.
}
def fork[A](a: => Par[A]): Par[A] = // This is the simplest and most natural implementation of `fork`, but there are some problems with it--for one, the outer `Callable` will block waiting for the "inner" task to complete. Since this blocking occupies a thread in our thread pool, or whatever resource backs the `ExecutorService`, this implies that we're losing out on some potential parallelism. Essentially, we're using two threads when one should suffice. This is a symptom of a more serious problem with the implementation, and we will discuss this later in the chapter.
es => es.submit(new Callable[A] {
def call = a(es).get
})
// Exercises 7.3: map2 that respects the contract of timeouts on Future.
def map2WithTimeOut[A, B, C](a: Par[A], b: Par[B])(f: (A, B) => C): Par[C] = ???
// Exercise 7.4: asyncF.
def asyncF[A,B](f: A => B): A => Par[B] = ???
// Exercise 7.5: sequence.
def sequence[A](as: List[Par[A]]): Par[List[A]] = ???
// Exercise 7.6: parFilter.
def parFilter[A](l: List[A])(f: A => Boolean): Par[List[A]] = ???
// Exercise 7.11: choiceN.
def choiceN[A](n: Par[Int])(choices: List[Par[A]]): Par[A] = ???
// Exercise 7.11: choice and terms of choiceN.
def choiceViaChoiceN[A](cond: Par[Boolean])(t: Par[A], f: Par[A]): Par[A] = ???
// Exercise 7.13: chooser.
def chooser[A,B](p: Par[A])(choices: A => Par[B]): Par[B] = ???
def choiceNViaChooser[A](n: Par[Int])(choices: List[Par[A]]): Par[A] = ???
def choiceViaChooser[A](a: Par[Boolean])(ifTrue: Par[A], ifFalse: Par[A]): Par[A] = ???
// Exercise 7.14: join.
def join[A](a: Par[Par[A]]): Par[A] = ???
// Exercise 7.14: joinViaFlatMap.
def joinViaFlatMap[A](a: Par[Par[A]]): Par[A] = ???
// Exercise 7.14: flatMapViaJoin.
def flatMapViaJoin[A,B](p: Par[A])(f: A => Par[B]): Par[B] = ???
def map[A,B](pa: Par[A])(f: A => B): Par[B] =
map2(pa, unit(()))((a,_) => f(a))
def sortPar(parList: Par[List[Int]]) = map(parList)(_.sorted)
def equal[A](e: ExecutorService)(p: Par[A], p2: Par[A]): Boolean =
p(e).get == p2(e).get
def delay[A](fa: => Par[A]): Par[A] =
es => fa(es)
/* Gives us infix syntax for `Par`. */
implicit def toParOps[A](p: Par[A]): ParOps[A] = new ParOps(p)
class ParOps[A](p: Par[A]) {
}
}
object Examples {
import Par._
def sum(ints: IndexedSeq[Int]): Int = // `IndexedSeq` is a superclass of random-access sequences like `Vector` in the standard library. Unlike lists, these sequences provide an efficient `splitAt` method for dividing them into two parts at a particular index.
if (ints.size <= 1)
ints.headOption getOrElse 0 // `headOption` is a method defined on all collections in Scala. We saw this function in chapter 3.
else {
val (l,r) = ints.splitAt(ints.length/2) // Divide the sequence in half using the `splitAt` function.
sum(l) + sum(r) // Recursively sum both halves and add the results together.
}
}
|
RheinMainScala/fpinscala
|
exercises/src/main/scala/fpinscala/parallelism/Par.scala
|
Scala
|
mit
| 4,855 |
package models.db
import java.sql.Timestamp
import models.OAuthClient
import services.DatabaseService
trait OAuthClientsTable extends AccountsTable {
protected val databaseService: DatabaseService
import databaseService.driver.api._
class OauthClients(tag: Tag) extends Table[OAuthClient](tag, "oauth_clients") {
def id = column[Option[Long]]("id", O.PrimaryKey, O.AutoInc)
def ownerId = column[Long]("owner_id")
def grantType = column[String]("grant_type")
def clientId = column[String]("client_id")
def clientSecret = column[String]("client_secret")
def redirectUri = column[Option[String]]("redirect_uri")
def createdAt = column[Timestamp]("createdAt")
def * = (id, ownerId, grantType, clientId, clientSecret, redirectUri, createdAt) <> (OAuthClient.tupled, OAuthClient.unapply)
//OauthClient - Account FK is defined in sql script and will be executed by FlyWay migration
}
protected val oauthClients = TableQuery[OauthClients]
}
|
ziyasal/Reserveon
|
src/main/scala/models/db/OAuthClientsTable.scala
|
Scala
|
mit
| 993 |
object Test {
type OrAlias = Int | Float
def m(s: OrAlias | String) = s match {
case _: Int => ; case _: Float => ; case _: String => ; }
}
|
som-snytt/dotty
|
tests/patmat/i2254.scala
|
Scala
|
apache-2.0
| 148 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive.execution
import java.sql.Timestamp
import org.apache.hadoop.hive.serde2.`lazy`.LazySimpleSerDe
import org.scalatest.Assertions._
import org.scalatest.BeforeAndAfterEach
import org.scalatest.exceptions.TestFailedException
import org.apache.spark.{SparkException, TaskContext, TestUtils}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.Column
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions.{Attribute, AttributeReference}
import org.apache.spark.sql.catalyst.plans.physical.Partitioning
import org.apache.spark.sql.execution.{SparkPlan, SparkPlanTest, UnaryExecNode}
import org.apache.spark.sql.hive.HiveUtils
import org.apache.spark.sql.hive.test.TestHiveSingleton
import org.apache.spark.sql.test.SQLTestUtils
import org.apache.spark.sql.types.StringType
class ScriptTransformationSuite extends SparkPlanTest with SQLTestUtils with TestHiveSingleton
with BeforeAndAfterEach {
import spark.implicits._
private val noSerdeIOSchema = HiveScriptIOSchema(
inputRowFormat = Seq.empty,
outputRowFormat = Seq.empty,
inputSerdeClass = None,
outputSerdeClass = None,
inputSerdeProps = Seq.empty,
outputSerdeProps = Seq.empty,
recordReaderClass = None,
recordWriterClass = None,
schemaLess = false
)
private val serdeIOSchema = noSerdeIOSchema.copy(
inputSerdeClass = Some(classOf[LazySimpleSerDe].getCanonicalName),
outputSerdeClass = Some(classOf[LazySimpleSerDe].getCanonicalName)
)
private var defaultUncaughtExceptionHandler: Thread.UncaughtExceptionHandler = _
private val uncaughtExceptionHandler = new TestUncaughtExceptionHandler
protected override def beforeAll(): Unit = {
super.beforeAll()
defaultUncaughtExceptionHandler = Thread.getDefaultUncaughtExceptionHandler
Thread.setDefaultUncaughtExceptionHandler(uncaughtExceptionHandler)
}
protected override def afterAll(): Unit = {
super.afterAll()
Thread.setDefaultUncaughtExceptionHandler(defaultUncaughtExceptionHandler)
}
override protected def afterEach(): Unit = {
super.afterEach()
uncaughtExceptionHandler.cleanStatus()
}
test("cat without SerDe") {
assume(TestUtils.testCommandAvailable("/bin/bash"))
val rowsDf = Seq("a", "b", "c").map(Tuple1.apply).toDF("a")
checkAnswer(
rowsDf,
(child: SparkPlan) => new ScriptTransformationExec(
input = Seq(rowsDf.col("a").expr),
script = "cat",
output = Seq(AttributeReference("a", StringType)()),
child = child,
ioschema = noSerdeIOSchema
),
rowsDf.collect())
assert(uncaughtExceptionHandler.exception.isEmpty)
}
test("cat with LazySimpleSerDe") {
assume(TestUtils.testCommandAvailable("/bin/bash"))
val rowsDf = Seq("a", "b", "c").map(Tuple1.apply).toDF("a")
checkAnswer(
rowsDf,
(child: SparkPlan) => new ScriptTransformationExec(
input = Seq(rowsDf.col("a").expr),
script = "cat",
output = Seq(AttributeReference("a", StringType)()),
child = child,
ioschema = serdeIOSchema
),
rowsDf.collect())
assert(uncaughtExceptionHandler.exception.isEmpty)
}
test("script transformation should not swallow errors from upstream operators (no serde)") {
assume(TestUtils.testCommandAvailable("/bin/bash"))
val rowsDf = Seq("a", "b", "c").map(Tuple1.apply).toDF("a")
val e = intercept[TestFailedException] {
checkAnswer(
rowsDf,
(child: SparkPlan) => new ScriptTransformationExec(
input = Seq(rowsDf.col("a").expr),
script = "cat",
output = Seq(AttributeReference("a", StringType)()),
child = ExceptionInjectingOperator(child),
ioschema = noSerdeIOSchema
),
rowsDf.collect())
}
assert(e.getMessage().contains("intentional exception"))
// Before SPARK-25158, uncaughtExceptionHandler will catch IllegalArgumentException
assert(uncaughtExceptionHandler.exception.isEmpty)
}
test("script transformation should not swallow errors from upstream operators (with serde)") {
assume(TestUtils.testCommandAvailable("/bin/bash"))
val rowsDf = Seq("a", "b", "c").map(Tuple1.apply).toDF("a")
val e = intercept[TestFailedException] {
checkAnswer(
rowsDf,
(child: SparkPlan) => new ScriptTransformationExec(
input = Seq(rowsDf.col("a").expr),
script = "cat",
output = Seq(AttributeReference("a", StringType)()),
child = ExceptionInjectingOperator(child),
ioschema = serdeIOSchema
),
rowsDf.collect())
}
assert(e.getMessage().contains("intentional exception"))
// Before SPARK-25158, uncaughtExceptionHandler will catch IllegalArgumentException
assert(uncaughtExceptionHandler.exception.isEmpty)
}
test("SPARK-14400 script transformation should fail for bad script command") {
assume(TestUtils.testCommandAvailable("/bin/bash"))
val rowsDf = Seq("a", "b", "c").map(Tuple1.apply).toDF("a")
val e = intercept[SparkException] {
val plan =
new ScriptTransformationExec(
input = Seq(rowsDf.col("a").expr),
script = "some_non_existent_command",
output = Seq(AttributeReference("a", StringType)()),
child = rowsDf.queryExecution.sparkPlan,
ioschema = serdeIOSchema)
SparkPlanTest.executePlan(plan, hiveContext)
}
assert(e.getMessage.contains("Subprocess exited with status"))
assert(uncaughtExceptionHandler.exception.isEmpty)
}
test("SPARK-24339 verify the result after pruning the unused columns") {
val rowsDf = Seq(
("Bob", 16, 176),
("Alice", 32, 164),
("David", 60, 192),
("Amy", 24, 180)).toDF("name", "age", "height")
checkAnswer(
rowsDf,
(child: SparkPlan) => new ScriptTransformationExec(
input = Seq(rowsDf.col("name").expr),
script = "cat",
output = Seq(AttributeReference("name", StringType)()),
child = child,
ioschema = serdeIOSchema
),
rowsDf.select("name").collect())
assert(uncaughtExceptionHandler.exception.isEmpty)
}
test("SPARK-25990: TRANSFORM should handle different data types correctly") {
assume(TestUtils.testCommandAvailable("python"))
val scriptFilePath = getTestResourcePath("test_script.py")
withTempView("v") {
val df = Seq(
(1, "1", 1.0, BigDecimal(1.0), new Timestamp(1)),
(2, "2", 2.0, BigDecimal(2.0), new Timestamp(2)),
(3, "3", 3.0, BigDecimal(3.0), new Timestamp(3))
).toDF("a", "b", "c", "d", "e") // Note column d's data type is Decimal(38, 18)
df.createTempView("v")
val query = sql(
s"""
|SELECT
|TRANSFORM(a, b, c, d, e)
|USING 'python $scriptFilePath' AS (a, b, c, d, e)
|FROM v
""".stripMargin)
// In Hive 1.2, the string representation of a decimal omits trailing zeroes.
// But in Hive 2.3, it is always padded to 18 digits with trailing zeroes if necessary.
val decimalToString: Column => Column = if (HiveUtils.isHive23) {
c => c.cast("string")
} else {
c => c.cast("decimal(1, 0)").cast("string")
}
checkAnswer(query, identity, df.select(
'a.cast("string"),
'b.cast("string"),
'c.cast("string"),
decimalToString('d),
'e.cast("string")).collect())
}
}
}
private case class ExceptionInjectingOperator(child: SparkPlan) extends UnaryExecNode {
override protected def doExecute(): RDD[InternalRow] = {
child.execute().map { x =>
assert(TaskContext.get() != null) // Make sure that TaskContext is defined.
Thread.sleep(1000) // This sleep gives the external process time to start.
throw new IllegalArgumentException("intentional exception")
}
}
override def output: Seq[Attribute] = child.output
override def outputPartitioning: Partitioning = child.outputPartitioning
}
|
zuotingbing/spark
|
sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/ScriptTransformationSuite.scala
|
Scala
|
apache-2.0
| 8,874 |
package com.eevolution.context.dictionary.infrastructure.service
import java.util.UUID
import akka.NotUsed
import com.eevolution.context.dictionary.domain._
import com.eevolution.context.dictionary.domain.model.MenuTrl
import com.eevolution.utils.PaginatedSequence
import com.lightbend.lagom.scaladsl.api.{Service, ServiceCall}
/**
* Copyright (C) 2003-2017, e-Evolution Consultants S.A. , http://www.e-evolution.com
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
* Email: [email protected], http://www.e-evolution.com , http://github.com/EmerisScala
* Created by [email protected] , www.e-evolution.com on 15/11/17.
*/
/**
* Menu Trl Service
*/
trait MenuTrlService extends Service with api.service.MenuTrlService {
override def getAll() : ServiceCall[NotUsed, List[MenuTrl]]
override def getById(id: Int): ServiceCall[NotUsed, MenuTrl]
override def getByUUID(uuid :UUID): ServiceCall[NotUsed, MenuTrl]
override def getAllByPage(pageNo: Option[Int], pageSize: Option[Int]): ServiceCall[NotUsed, PaginatedSequence[MenuTrl]]
def descriptor = {
import Service._
named("menuTrl").withCalls(
pathCall("/api/v1_0_0/menuTrl/all", getAll _) ,
pathCall("/api/v1_0_0/menuTrl/:id", getById _),
pathCall("/api/v1_0_0/menuTrl/:uuid", getByUUID _) ,
pathCall("/api/v1_0_0/menuTrl?pageNo&pageSize", getAllByPage _)
)
}
}
|
adempiere/ADReactiveSystem
|
dictionary-impl/src/main/scala/com/eevolution/context/dictionary/infrastructure/service/MenuTrlService.scala
|
Scala
|
gpl-3.0
| 2,009 |
/**
* Copyright 2016, deepsense.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.deepsense.workflowexecutor.pyspark
import scala.collection.JavaConverters._
import com.typesafe.config.ConfigFactory
class PythonPathGenerator(
private val pySparkPath: String
) {
private val config = ConfigFactory.load.getConfig("pyspark")
private val additionalPythonPath: Seq[String] = config.getStringList("python-path").asScala
private val additionalPaths = additionalPythonPath.map(p => s"$pySparkPath/$p")
private val pythonPathEnvKey = "PYTHONPATH"
private val envPythonPath = Option(System.getenv().get(pythonPathEnvKey))
val generatedPythonPath: Seq[String] = pySparkPath +: (additionalPaths ++ envPythonPath)
def pythonPath(additionalPaths: String*): String =
(generatedPythonPath ++ additionalPaths).mkString(":")
def env(additionalPaths: String*): (String, String) =
(pythonPathEnvKey, pythonPath(additionalPaths: _*))
}
|
deepsense-io/seahorse-workflow-executor
|
workflowexecutor/src/main/scala/io/deepsense/workflowexecutor/pyspark/PythonPathGenerator.scala
|
Scala
|
apache-2.0
| 1,478 |
/*
* Copyright 2013 Twitter Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License. You may obtain
* a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.twitter.storehaus
import com.twitter.util.Future
/**
* Methods used in the various unpivot stores.
*
* @author Sam Ritchie
*/
object PivotOps {
/**
* Queries the underlying store with a multiGet and transforms the
* underlying map by filtering out all (innerK -> v) pairs that a)
* contain None as the value or 2) in combination with the paired
* outerK don't pass the input filter.
*/
def multiGetFiltered[OuterK, InnerK, V](store: ReadableStore[OuterK, Map[InnerK, V]], ks: Set[OuterK])
(pred: (OuterK, InnerK) => Boolean)
: Map[OuterK, Future[Option[List[(InnerK, V)]]]] =
store.multiGet(ks)
.map { case (outerK, futureOptV) =>
outerK -> futureOptV.map { optV =>
optV.map { _.filterKeys { pred(outerK, _) }.toList }
.filter { !_.isEmpty }
}
}
/**
* For each value, filters out InnerK entries with a value of None.
*/
def collectPivoted[K, OuterK, InnerK, V](pivoted: Map[OuterK, Map[InnerK, Option[V]]])
: Map[OuterK, Future[Option[List[(InnerK, V)]]]] =
pivoted.mapValues { m =>
Future.value {
Some(m.collect { case (innerK, Some(v)) => innerK -> v }.toList)
.filter { !_.isEmpty }
}
}
type InnerPair[OuterK, InnerK, V] = (OuterK, Option[Map[InnerK,V]])
/**
* Really belongs in Algebird, but recoding this explicitly keeps the dependency out.
*/
private def plusM[K, V](
l: Map[K, Future[Option[List[V]]]],
r: Map[K, Future[Option[List[V]]]]
): Map[K, Future[Option[List[V]]]] =
(r /: l) { case (m, (k, futureOptV)) =>
val newV = for {
leftOptV <- futureOptV
rightOptV <- m.getOrElse(k, Future.None)
} yield (leftOptV, rightOptV) match {
case (None, None) => None
case (None, Some(v)) => Some(v)
case (Some(v), None) => Some(v)
case (Some(l), Some(r)) => Some(l ++ r)
}
m + (k -> newV)
}
def multiPut[K, K1 <: K, OuterK, InnerK, V](store: Store[OuterK, Map[InnerK, V]], kvs: Map[K1, Option[V]])
(split: K => (OuterK, InnerK))
(implicit collect: FutureCollector[InnerPair[OuterK, InnerK, V]]): Map[K1, Future[Unit]] = {
val pivoted = CollectionOps.pivotMap[K1, OuterK, InnerK, Option[V]](kvs)(split)
// Input data merged with all relevant data from the underlying
// store.
val mergedResult: Map[OuterK, Future[Option[Map[InnerK, V]]]] =
plusM(
multiGetFiltered(store, pivoted.keySet) { case (outerK, innerK) =>
!pivoted(outerK).contains(innerK)
},
collectPivoted(pivoted)
).mapValues { _.map { _.map { _.toMap } } }
// Result of a multiPut of all affected pairs in the underlying
// store.
val submitted: Future[Map[OuterK, Future[Unit]]] =
FutureOps.mapCollect(mergedResult)(collect).map { store.multiPut(_) }
// The final flatMap returns a map of K to the future responsible
// for writing K's value into the underlying store. Due to
// packing, many Ks will reference the same Future[Unit].
kvs.flatMap {
case (k, _) =>
val (outerK, _) = split(k)
(1 to pivoted(outerK).size).map { _ =>
k -> submitted.flatMap { _.apply(outerK) }
}
}.toMap
}
}
|
joychugh/storehaus
|
storehaus-core/src/main/scala/com/twitter/storehaus/PivotOps.scala
|
Scala
|
apache-2.0
| 3,895 |
import akka.actor._
object TickTock {
object Tick
object Tock
val random = new scala.util.Random
class Counter extends Actor {
var counter = 0
def receive = {
case Tick =>
Thread.sleep(random.nextInt(500))
counter += 1
println(self.path.name + " after Tick: " + counter)
case Tock =>
Thread.sleep(random.nextInt(500))
counter -= 1
println(self.path.name + " after Tock: " + counter)
}
}
def main (args: Array[String]) {
val system = ActorSystem("CountingDemo")
println("Starting Counter 1.")
val counter1 = system.actorOf(Props[Counter], "Counter1")
counter1 ! Tick
(1 to 5).foreach(_ => counter1 ! Tick)
Thread.sleep(1000)
println("Starting Counter 2.")
val counter2 = system.actorOf(Props[Counter], "Counter2")
(1 to 5).foreach { _ =>
counter1 ! Tock
counter2 ! Tick
}
Thread.sleep(2000)
system.shutdown
}
}
|
jasonbaldridge/akka-tutorial
|
TickTock.scala
|
Scala
|
apache-2.0
| 950 |
/*
* Copyright 2015 org.NLP4L
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.nlp4l.ltr.support.procs
import com.typesafe.config.Config
import scala.collection.convert.WrapAsScala._
abstract class TrainerFactory(settings: Config) extends ConfiguredFactory(settings) {
def getInstance(): Trainer
}
trait Trainer {}
trait PointwiseTrainer extends Trainer {
def train(featureNames: Array[String],
features: Array[Vector[Float]],
labels: Array[Int],
maxLabel: Int,
progress: TrainingProgress) : String
}
trait PseudoPairwiseTrainer extends Trainer {
def train(featureNames: Array[String],
features: Vector[Vector[(Int, Vector[Float])]],
progress: TrainingProgress) : String
}
trait PairwiseTrainer extends Trainer {
// TBD
}
trait TrainingProgress {
def report(progress: Int)
}
abstract class ConfiguredFactory(val settings: Config){
def getStrParam(name: String, default: String): String = {
if(settings.hasPath(name)) settings.getString(name) else default
}
def getStrParamRequired(name: String): String = {
settings.getString(name)
}
def getIntParam(name: String, default: Int): Int = {
if(settings.hasPath(name)) settings.getInt(name) else default
}
def getIntParamRequired(name: String): Int = {
settings.getInt(name)
}
def getLongParam(name: String, default: Long): Long = {
if(settings.hasPath(name)) settings.getLong(name) else default
}
def getLongParamRequired(name: String): Long = {
settings.getLong(name)
}
def getDoubleParam(name: String, default: Double): Double = {
if(settings.hasPath(name)) settings.getDouble(name) else default
}
def getDoubleParamRequired(name: String): Double = {
settings.getDouble(name)
}
def getBoolParam(name: String, default: Boolean): Boolean = {
if(settings.hasPath(name)) settings.getBoolean(name) else default
}
def getBoolParamRequired(name: String): Boolean = {
settings.getBoolean(name)
}
def getStrListParam(name: String, default: Seq[String]): Seq[String] = {
if(settings.hasPath(name)) settings.getStringList(name) else default
}
def getStrListParamRequired(name: String): Seq[String] = {
settings.getStringList(name)
}
def getConfigParam(name: String, default: Config): Config = {
if(settings.hasPath(name)) settings.getConfig(name) else default
}
def getConfigParamRequired(name: String): Config = {
settings.getConfig(name)
}
def getConfigListParam(name: String, default: Seq[Config]): Seq[Config] = {
if(settings.hasPath(name)) settings.getConfigList(name) else default
}
def getConfigListParamRequired(name: String): Seq[Config] = {
settings.getConfigList(name)
}
}
abstract class DeployerFactory(settings: Config) extends ConfiguredFactory(settings){
def getInstance(): Deployer
}
trait Deployer {
def deploy (model_data: String): Unit
}
|
fubuki/nlp4l
|
app/org/nlp4l/ltr/support/procs/Trainers.scala
|
Scala
|
apache-2.0
| 3,452 |
/*******************************************************************************
* Copyright (c) 2019. Carl Minden
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
******************************************************************************/
package com.anathema_roguelike
package entities.characters.player.classes
import com.anathema_roguelike.entities.characters.perks.PerkGroup
import com.anathema_roguelike.entities.characters.player.perks.abilities.shapeshifting.Shapeshift
import com.anathema_roguelike.entities.characters.player.perks.abilities.shapeshifting.ShapeshiftBat
import com.anathema_roguelike.entities.characters.player.perks.abilities.shapeshifting.ShapeshiftBear
import com.anathema_roguelike.entities.characters.player.perks.abilities.shapeshifting.ShapeshiftPanther
import com.anathema_roguelike.entities.characters.player.perks.abilities.spells.SpellOrSpecialization
import com.anathema_roguelike.entities.characters.player.perks.masteries.MasteryLevel
import com.anathema_roguelike.entities.characters.player.perks.skills.AnimalisticCasting
import com.anathema_roguelike.entities.characters.player.perks.skills.Attunement
import com.anathema_roguelike.entities.characters.player.perks.skills.Discipline
import com.anathema_roguelike.entities.characters.player.perks.skills.SureFooting
import com.anathema_roguelike.entities.characters.player.perks.specializations.AbilitySpecialization
import com.anathema_roguelike.stats.characterstats.masteries.BluntWeaponMastery
import com.anathema_roguelike.stats.characterstats.masteries.ShapeshiftingMastery
import com.anathema_roguelike.stats.characterstats.masteries.SpellMastery
class Druid() extends PlayerClass(
new PerkGroup /*1*/ (new SpellOrSpecialization[Druid](1), new ShapeshiftPanther),
new PerkGroup /*2*/ (new SpellOrSpecialization[Druid](1), new MasteryLevel[SpellMastery]),
new PerkGroup /*3*/ (new MasteryLevel[BluntWeaponMastery], new MasteryLevel[ShapeshiftingMastery]),
new PerkGroup /*4*/ (new SpellOrSpecialization[Druid](1), new MasteryLevel[SpellMastery]),
new PerkGroup /*5*/ (new SpellOrSpecialization[Druid](1), new MasteryLevel[BluntWeaponMastery], new MasteryLevel[ShapeshiftingMastery]),
new PerkGroup /*6*/ (new SpellOrSpecialization[Druid](1), new MasteryLevel[SpellMastery]),
new PerkGroup /*7*/ (new SpellOrSpecialization[Druid](2), new ShapeshiftBat),
new PerkGroup /*8*/ (new MasteryLevel[SpellMastery], new AbilitySpecialization[Shapeshift]),
new PerkGroup /*9*/ (new SpellOrSpecialization[Druid](2), new MasteryLevel[ShapeshiftingMastery]),
new PerkGroup /*10*/ (new SpellOrSpecialization[Druid](2), new MasteryLevel[SpellMastery], new Discipline),
new PerkGroup /*11*/ (new MasteryLevel[ShapeshiftingMastery], new AbilitySpecialization[Shapeshift]),
new PerkGroup /*12*/ (new SpellOrSpecialization[Druid](2), new ShapeshiftBear),
new PerkGroup /*13*/ (new MasteryLevel[ShapeshiftingMastery], new Attunement),
new PerkGroup /*14*/ (new SpellOrSpecialization[Druid](3), new MasteryLevel[SpellMastery]),
new PerkGroup /*15*/ (new MasteryLevel[BluntWeaponMastery], new AbilitySpecialization[Shapeshift], new SureFooting),
new PerkGroup /*16*/ (new SpellOrSpecialization[Druid](3), new MasteryLevel[ShapeshiftingMastery]),
new PerkGroup /*17*/ (new MasteryLevel[SpellMastery], new AbilitySpecialization[Shapeshift]),
new PerkGroup /*18*/ (new SpellOrSpecialization[Druid](3), new MasteryLevel[SpellMastery]),
new PerkGroup /*19*/ (new MasteryLevel[SpellMastery], new MasteryLevel[ShapeshiftingMastery]),
new PerkGroup /*20*/ (new SpellOrSpecialization[Druid](4), new MasteryLevel[BluntWeaponMastery], new AnimalisticCasting)) {
}
|
carlminden/anathema-roguelike
|
src/com/anathema_roguelike/entities/characters/player/classes/Druid.scala
|
Scala
|
gpl-3.0
| 4,274 |
package com.twitter.finagle.builder
import com.twitter.conversions.time._
import com.twitter.finagle._
import com.twitter.finagle.client.Transporter.Credentials
import com.twitter.finagle.client.{DefaultPool, StackClient, StdStackClient}
import com.twitter.finagle.client.{StackBasedClient, Transporter}
import com.twitter.finagle.factory.{BindingFactory, TimeoutFactory}
import com.twitter.finagle.filter.ExceptionSourceFilter
import com.twitter.finagle.loadbalancer.LoadBalancerFactory
import com.twitter.finagle.netty3.Netty3Transporter
import com.twitter.finagle.service.FailFastFactory.FailFast
import com.twitter.finagle.service._
import com.twitter.finagle.ssl.Ssl
import com.twitter.finagle.stats.{NullStatsReceiver, StatsReceiver}
import com.twitter.finagle.tracing.{NullTracer, TraceInitializerFilter}
import com.twitter.finagle.transport.Transport
import com.twitter.finagle.util._
import com.twitter.util
import com.twitter.util.{Duration, Future, NullMonitor, Time, Try}
import java.net.{InetSocketAddress, SocketAddress}
import java.util.concurrent.atomic.AtomicBoolean
import java.util.logging.Level
import javax.net.ssl.SSLContext
import org.jboss.netty.channel.{Channel, ChannelFactory}
import scala.annotation.implicitNotFound
/**
* Factory for [[com.twitter.finagle.builder.ClientBuilder]] instances
*/
object ClientBuilder {
type Complete[Req, Rep] =
ClientBuilder[Req, Rep, ClientConfig.Yes, ClientConfig.Yes, ClientConfig.Yes]
type NoCluster[Req, Rep] =
ClientBuilder[Req, Rep, Nothing, ClientConfig.Yes, ClientConfig.Yes]
type NoCodec =
ClientBuilder[_, _, ClientConfig.Yes, Nothing, ClientConfig.Yes]
def apply() = new ClientBuilder()
/**
* Used for Java access.
*/
def get() = apply()
/**
* Provides a typesafe `build` for Java.
*/
def safeBuild[Req, Rep](builder: Complete[Req, Rep]): Service[Req, Rep] =
builder.build()(ClientConfigEvidence.FullyConfigured)
/**
* Provides a typesafe `buildFactory` for Java.
*/
def safeBuildFactory[Req, Rep](builder: Complete[Req, Rep]): ServiceFactory[Req, Rep] =
builder.buildFactory()(ClientConfigEvidence.FullyConfigured)
/**
* Returns a [[com.twitter.finagle.client.StackClient]] which is equivalent to a
* `ClientBuilder` configured with the same codec; that is, given
* {{{
* val cb = ClientBuilder()
* .dest(dest)
* .name(name)
* .codec(codec)
*
* val sc = ClientBuilder.stackClientOfCodec(codec)
* }}}
* then the following are equivalent
* {{{
* cb.build()
* sc.newService(dest, name)
* }}}
* and the following are also equivalent
* {{{
* cb.buildFactory()
* sc.newClient(dest, name)
* }}}
*/
def stackClientOfCodec[Req, Rep](
codecFactory: CodecFactory[Req, Rep]#Client
): StackClient[Req, Rep] =
ClientBuilderClient(CodecClient[Req, Rep](codecFactory))
}
object ClientConfig {
sealed trait Yes
type FullySpecified[Req, Rep] = ClientConfig[Req, Rep, Yes, Yes, Yes]
val DefaultName = "client"
private case class NilClient[Req, Rep](
stack: Stack[ServiceFactory[Req, Rep]] = StackClient.newStack[Req, Rep],
params: Stack.Params = DefaultParams
) extends StackBasedClient[Req, Rep] {
def withParams(ps: Stack.Params) = copy(params = ps)
def transformed(t: Stack.Transformer) = copy(stack = t(stack))
def newService(dest: Name, label: String): Service[Req, Rep] =
newClient(dest, label).toService
def newClient(dest: Name, label: String): ServiceFactory[Req, Rep] =
ServiceFactory(() => Future.value(Service.mk[Req, Rep](_ => Future.exception(
new Exception("unimplemented")))))
}
def nilClient[Req, Rep]: StackBasedClient[Req, Rep] = NilClient[Req, Rep]()
// params specific to ClientBuilder
private[builder] case class DestName(name: Name) {
def mk(): (DestName, Stack.Param[DestName]) =
(this, DestName.param)
}
private[builder] object DestName {
implicit val param = Stack.Param(DestName(Name.empty))
}
private[builder] case class GlobalTimeout(timeout: Duration) {
def mk(): (GlobalTimeout, Stack.Param[GlobalTimeout]) =
(this, GlobalTimeout.param)
}
private[builder] object GlobalTimeout {
implicit val param = Stack.Param(GlobalTimeout(Duration.Top))
}
// private[twitter] for historical use, but should be private[builder]
// and may become so in the future.
private[twitter] case class Retries(policy: RetryPolicy[Try[Nothing]]) {
def mk(): (Retries, Stack.Param[Retries]) =
(this, Retries.param)
}
private[twitter] object Retries {
implicit val param = Stack.Param(Retries(RetryPolicy.Never))
}
private[builder] case class Daemonize(onOrOff: Boolean) {
def mk(): (Daemonize, Stack.Param[Daemonize]) =
(this, Daemonize.param)
}
private[builder] object Daemonize {
implicit val param = Stack.Param(Daemonize(true))
}
private[builder] case class MonitorFactory(mFactory: String => util.Monitor) {
def mk(): (MonitorFactory, Stack.Param[MonitorFactory]) =
(this, MonitorFactory.param)
}
private[builder] object MonitorFactory {
implicit val param = Stack.Param(MonitorFactory(_ => NullMonitor))
}
// historical defaults for ClientBuilder
private[builder] val DefaultParams = Stack.Params.empty +
param.Stats(NullStatsReceiver) +
param.Label(DefaultName) +
DefaultPool.Param(low = 1, high = Int.MaxValue,
bufferSize = 0, idleTime = 5.seconds, maxWaiters = Int.MaxValue) +
param.Tracer(NullTracer) +
param.Monitor(NullMonitor) +
param.Reporter(NullReporterFactory) +
Daemonize(false)
}
@implicitNotFound("Builder is not fully configured: Cluster: ${HasCluster}, Codec: ${HasCodec}, HostConnectionLimit: ${HasHostConnectionLimit}")
private[builder] trait ClientConfigEvidence[HasCluster, HasCodec, HasHostConnectionLimit]
private[builder] object ClientConfigEvidence {
implicit object FullyConfigured extends ClientConfigEvidence[ClientConfig.Yes, ClientConfig.Yes, ClientConfig.Yes]
}
/**
* TODO: do we really need to specify HasCodec? -- it's implied in a
* way by the proper Req, Rep.
*
* Note: these are documented in ClientBuilder, as that is where they
* are accessed by the end-user.
*/
private[builder] final class ClientConfig[Req, Rep, HasCluster, HasCodec, HasHostConnectionLimit]
/**
* A builder of Finagle [[com.twitter.finagle.Client Clients]].
*
* Please see the
* [[http://twitter.github.io/finagle/guide/FAQ.html#configuring-finagle6 Finagle user guide]]
* for information on a newer set of client-construction APIs introduced in Finagle v6.
*
* {{{
* val client = ClientBuilder()
* .codec(Http)
* .hosts("localhost:10000,localhost:10001,localhost:10003")
* .hostConnectionLimit(1)
* .tcpConnectTimeout(1.second) // max time to spend establishing a TCP connection.
* .retries(2) // (1) per-request retries
* .reportTo(new OstrichStatsReceiver) // export host-level load data to ostrich
* .logger(Logger.getLogger("http"))
* .build()
* }}}
*
* The `ClientBuilder` requires the definition of `cluster`, `codec`,
* and `hostConnectionLimit`. In Scala, these are statically type
* checked, and in Java the lack of any of the above causes a runtime
* error.
*
* The `build` method uses an implicit argument to statically
* typecheck the builder (to ensure completeness, see above). The Java
* compiler cannot provide such implicit, so we provide a separate
* function in Java to accomplish this. Thus, the Java code for the
* above is
*
* {{{
* Service<HttpRequest, HttpResponse> service =
* ClientBuilder.safeBuild(
* ClientBuilder.get()
* .codec(new Http())
* .hosts("localhost:10000,localhost:10001,localhost:10003")
* .hostConnectionLimit(1)
* .tcpConnectTimeout(1.second)
* .retries(2)
* .reportTo(new OstrichStatsReceiver())
* .logger(Logger.getLogger("http")))
* }}}
*
* Alternatively, using the `unsafeBuild` method on `ClientBuilder`
* verifies the builder dynamically, resulting in a runtime error
* instead of a compiler error.
*
* =Defaults=
*
* The following defaults are applied to clients constructed via ClientBuilder,
* unless overridden with the corresponding method. These defaults were chosen
* carefully so as to work well for most use cases.
*
* Commonly-configured options:
*
* - `connectTimeout`: [[com.twitter.util.Duration.Top Duration.Top]]
* - `tcpConnectTimeout`: 1 second
* - `requestTimeout`: [[com.twitter.util.Duration.Top Duration.Top]]
* - `timeout`: [[com.twitter.util.Duration.Top Duration.Top]]
* - `hostConnectionLimit`: `Int.MaxValue`
* - `hostConnectionCoresize`: 0
* - `hostConnectionIdleTime`: [[com.twitter.util.Duration.Top Duration.Top]]
* - `hostConnectionMaxWaiters`: `Int.MaxValue`
* - `failFast`: true
* - `failureAccrualParams`, `failureAccrualFactory`:
* `numFailures` = 5, `markDeadFor` = 5 seconds
*
* Advanced options:
*
* ''Before changing any of these, make sure that you know exactly how they will
* affect your application -- these options are typically only changed by expert
* users.''
*
* - `keepAlive`: Unspecified, in which case the
* [[http://docs.oracle.com/javase/7/docs/api/java/net/StandardSocketOptions.html?is-external=true#SO_KEEPALIVE Java default]]
* of `false` is used
* - `readerIdleTimeout`: [[com.twitter.util.Duration.Top Duration.Top]]
* - `writerIdleTimeout`: [[com.twitter.util.Duration.Top Duration.Top]]
* - `hostConnectionMaxIdleTime`: [[com.twitter.util.Duration.Top Duration.Top]]
* - `hostConnectionMaxLifeTime`: [[com.twitter.util.Duration.Top Duration.Top]]
* - `sendBufferSize`, `recvBufferSize`: OS-defined default value
*/
class ClientBuilder[Req, Rep, HasCluster, HasCodec, HasHostConnectionLimit] private[finagle](
client: StackBasedClient[Req, Rep]
) {
import ClientConfig._
import com.twitter.finagle.param._
// Convenient aliases.
type FullySpecifiedConfig = FullySpecified[Req, Rep]
type ThisConfig = ClientConfig[Req, Rep, HasCluster, HasCodec, HasHostConnectionLimit]
type This = ClientBuilder[Req, Rep, HasCluster, HasCodec, HasHostConnectionLimit]
private[builder] def this() = this(ClientConfig.nilClient)
override def toString() = "ClientBuilder(%s)".format(params)
private def copy[Req1, Rep1, HasCluster1, HasCodec1, HasHostConnectionLimit1](
client: StackBasedClient[Req1, Rep1]
): ClientBuilder[Req1, Rep1, HasCluster1, HasCodec1, HasHostConnectionLimit1] =
new ClientBuilder(client)
private def configured[P: Stack.Param, HasCluster1, HasCodec1, HasHostConnectionLimit1](
param: P
): ClientBuilder[Req, Rep, HasCluster1, HasCodec1, HasHostConnectionLimit1] =
copy(client.configured(param))
// Used in deprecated KetamaClientBuilder, remove when we drop it in
// favor of the finagle.Memcached protocol object.
private[finagle] def underlying: StackBasedClient[Req, Rep] = client
def params: Stack.Params = client.params
/**
* Specify the set of hosts to connect this client to. Requests
* will be load balanced across these. This is a shorthand form for
* specifying a cluster.
*
* One of the {{hosts}} variations or direct specification of the
* cluster (via {{cluster}}) is required.
*
* @param hostnamePortCombinations comma-separated "host:port"
* string.
*/
def hosts(
hostnamePortCombinations: String
): ClientBuilder[Req, Rep, Yes, HasCodec, HasHostConnectionLimit] = {
val addresses = InetSocketAddressUtil.parseHosts(hostnamePortCombinations)
hosts(addresses)
}
/**
* A variant of {{hosts}} that takes a sequence of
* [[java.net.SocketAddress]] instead.
*/
def hosts(
addrs: Seq[SocketAddress]
): ClientBuilder[Req, Rep, Yes, HasCodec, HasHostConnectionLimit] =
dest(Name.bound(addrs:_*))
/**
* A convenience method for specifying a one-host
* [[java.net.SocketAddress]] client.
*/
def hosts(
address: SocketAddress
): ClientBuilder[Req, Rep, Yes, HasCodec, HasHostConnectionLimit] =
hosts(Seq(address))
/**
* The logical destination of requests dispatched through this
* client, as evaluated by a resolver. If the name evaluates a
* label, this replaces the builder's current name.
*/
def dest(
addr: String
): ClientBuilder[Req, Rep, Yes, HasCodec, HasHostConnectionLimit] = {
Resolver.evalLabeled(addr) match {
case (n, "") => dest(n)
case (n, l) =>
val Label(label) = params[Label]
val cb =
if (label.isEmpty || l != addr)
this.name(l)
else
this
cb.dest(n)
}
}
/**
* The logical destination of requests dispatched through this
* client.
*/
def dest(
name: Name
): ClientBuilder[Req, Rep, Yes, HasCodec, HasHostConnectionLimit] =
configured(DestName(name))
/**
* The base [[com.twitter.finagle.Dtab]] used to interpret logical
* destinations for this client. (This is given as a function to
* permit late initialization of [[com.twitter.finagle.Dtab.base]].)
*/
def baseDtab(baseDtab: () => Dtab): This =
configured(BindingFactory.BaseDtab(baseDtab))
/**
* Specify a cluster directly. A
* [[com.twitter.finagle.builder.Cluster]] defines a dynamic
* mechanism for specifying a set of endpoints to which this client
* remains connected.
*/
def cluster(
cluster: Cluster[SocketAddress]
): ClientBuilder[Req, Rep, Yes, HasCodec, HasHostConnectionLimit] =
group(Group.fromCluster(cluster))
def group(
group: Group[SocketAddress]
): ClientBuilder[Req, Rep, Yes, HasCodec, HasHostConnectionLimit] =
dest(Name.fromGroup(group))
/**
* Specify a load balancer. The load balancer implements
* a strategy for choosing one from a set of hosts to service a request
*/
def loadBalancer(loadBalancer: LoadBalancerFactory): This =
configured(LoadBalancerFactory.Param(loadBalancer))
/**
* Specify the codec. The codec implements the network protocol
* used by the client, and consequently determines the `Req` and `Rep`
* type variables. One of the codec variations is required.
*/
def codec[Req1, Rep1](
codec: Codec[Req1, Rep1]
): ClientBuilder[Req1, Rep1, HasCluster, Yes, HasHostConnectionLimit] =
this.codec(Function.const(codec)(_))
.configured(ProtocolLibrary(codec.protocolLibraryName))
/**
* A variation of `codec` that supports codec factories. This is
* used by codecs that need dynamic construction, but should be
* transparent to the user.
*/
def codec[Req1, Rep1](
codecFactory: CodecFactory[Req1, Rep1]
): ClientBuilder[Req1, Rep1, HasCluster, Yes, HasHostConnectionLimit] =
this.codec(codecFactory.client)
.configured(ProtocolLibrary(codecFactory.protocolLibraryName))
/**
* A variation of codec for codecs that support only client-codecs.
*/
def codec[Req1, Rep1](
codecFactory: CodecFactory[Req1, Rep1]#Client
): ClientBuilder[Req1, Rep1, HasCluster, Yes, HasHostConnectionLimit] =
copy(CodecClient[Req1, Rep1](codecFactory).withParams(params))
/**
* Overrides the stack and [[com.twitter.finagle.Client]] that will be used
* by this builder.
*
* @param client A `StackBasedClient` representation of a
* [[com.twitter.finagle.Client]]. `client` is materialized with the state of
* configuration when `build` is called. There is no guarantee that all
* builder parameters will be used by the resultant `Client`; it is up to the
* discretion of `client` itself and the protocol implementation. For example,
* the Mux protocol has no use for most connection pool parameters (e.g.
* `hostConnectionLimit`). Thus when configuring
* [[com.twitter.finagle.ThriftMux]] clients (via [[stack(ThriftMux.client)]]),
* such connection pool parameters will not be applied.
*/
def stack[Req1, Rep1](
client: StackBasedClient[Req1, Rep1]
): ClientBuilder[Req1, Rep1, HasCluster, Yes, Yes] = {
copy(client.withParams(client.params ++ params))
}
@deprecated("Use tcpConnectTimeout instead", "5.0.1")
def connectionTimeout(duration: Duration): This = tcpConnectTimeout(duration)
/**
* Specify the TCP connection timeout.
*/
def tcpConnectTimeout(duration: Duration): This =
configured(Transporter.ConnectTimeout(duration))
/**
* The request timeout is the time given to a *single* request (if
* there are retries, they each get a fresh request timeout). The
* timeout is applied only after a connection has been acquired.
* That is: it is applied to the interval between the dispatch of
* the request and the receipt of the response.
*/
def requestTimeout(duration: Duration): This =
configured(TimeoutFilter.Param(duration))
/**
* The connect timeout is the timeout applied to the acquisition of
* a Service. This includes both queueing time (eg. because we
* cannot create more connections due to `hostConnectionLimit` and
* there are more than `hostConnectionLimit` requests outstanding)
* as well as physical connection time. Futures returned from
* `factory()` will always be satisfied within this timeout.
*
* This timeout is also used for name resolution, separately from
* queueing and physical connection time, so in the worst case the
* time to acquire a service may be double the given duration before
* timing out.
*/
def connectTimeout(duration: Duration): This =
configured(TimeoutFactory.Param(duration))
/**
* Total request timeout. This timeout is applied from the issuance
* of a request (through `service(request)`) until the
* satisfaction of that reply future. No request will take longer
* than this.
*
* Applicable only to service-builds (`build()`)
*/
def timeout(duration: Duration): This =
configured(GlobalTimeout(duration))
/**
* Apply TCP keepAlive (`SO_KEEPALIVE` socket option).
*/
def keepAlive(value: Boolean): This =
configured(params[Transport.Liveness].copy(keepAlive = Some(value)))
/**
* The maximum time a connection may have received no data.
*/
def readerIdleTimeout(duration: Duration): This =
configured(params[Transport.Liveness].copy(readTimeout = duration))
/**
* The maximum time a connection may not have sent any data.
*/
def writerIdleTimeout(duration: Duration): This =
configured(params[Transport.Liveness].copy(writeTimeout = duration))
/**
* Report stats to the given `StatsReceiver`. This will report
* verbose global statistics and counters, that in turn may be
* exported to monitoring applications.
*
* @note Per hosts statistics will '''NOT''' be exported to this receiver
*
* @see [[ClientBuilder.reportHostStats]]
*/
def reportTo(receiver: StatsReceiver): This =
configured(Stats(receiver))
/**
* Report per host stats to the given `StatsReceiver`.
* The statsReceiver will be scoped per client, like this:
* client/connect_latency_ms_max/0.0.0.0:64754
*/
def reportHostStats(receiver: StatsReceiver): This =
configured(LoadBalancerFactory.HostStats(receiver))
/**
* Give a meaningful name to the client. Required.
*/
def name(value: String): This =
configured(Label(value))
/**
* The maximum number of connections that are allowed per host.
* Required. Finagle guarantees to never have more active
* connections than this limit.
*/
def hostConnectionLimit(value: Int): ClientBuilder[Req, Rep, HasCluster, HasCodec, Yes] =
configured(params[DefaultPool.Param].copy(high = value))
/**
* The core size of the connection pool: the pool is not shrinked below this limit.
*/
def hostConnectionCoresize(value: Int): This =
configured(params[DefaultPool.Param].copy(low = value))
/**
* The amount of time a connection is allowed to linger (when it
* otherwise would have been closed by the pool) before being
* closed.
*/
def hostConnectionIdleTime(timeout: Duration): This =
configured(params[DefaultPool.Param].copy(idleTime = timeout))
/**
* The maximum queue size for the connection pool.
*/
def hostConnectionMaxWaiters(nWaiters: Int): This =
configured(params[DefaultPool.Param].copy(maxWaiters = nWaiters))
/**
* The maximum time a connection is allowed to linger unused.
*/
def hostConnectionMaxIdleTime(timeout: Duration): This =
configured(params[ExpiringService.Param].copy(idleTime = timeout))
/**
* The maximum time a connection is allowed to exist, regardless of occupancy.
*/
def hostConnectionMaxLifeTime(timeout: Duration): This =
configured(params[ExpiringService.Param].copy(lifeTime = timeout))
/**
* Experimental option to buffer `size` connections from the pool.
* The buffer is fast and lock-free, reducing contention for
* services with very high requests rates. The buffer size should
* be sized roughly to the expected concurrency. Buffers sized by
* power-of-twos may be faster due to the use of modular
* arithmetic.
*
* @note This will be integrated into the mainline pool, at
* which time the experimental option will go away.
*/
def expHostConnectionBufferSize(size: Int): This =
configured(params[DefaultPool.Param].copy(bufferSize = size))
/**
* Retry (some) failed requests up to `value - 1` times.
*
* Retries are only done if the request failed with something
* known to be safe to retry. This includes [[WriteException WriteExceptions]]
* and [[Failure]]s that are marked [[Failure.Restartable restartable]].
*
* The configured policy has jittered backoffs between retries.
*
* @param value the maximum number of attempts (including retries) that
* can be made.
* - A value of `1` means one attempt and no retries
* on failure.
* - A value of `2` means one attempt and then a
* single retry if the failure is known to be safe to retry.
*
* @note The failures seen in the client will '''not include'''
* application level failures. This is particularly important for
* codecs that include exceptions, such as `Thrift`.
*
* This is only applicable to service-builds (`build()`).
*
* @see [[com.twitter.finagle.service.RetryPolicy.tries]]
*/
def retries(value: Int): This =
retryPolicy(RetryPolicy.tries(value))
/**
* Retry failed requests according to the given [[RetryPolicy]].
*
* @note The failures seen in the client will '''not include'''
* application level failures. This is particularly important for
* codecs that include exceptions, such as `Thrift`.
*
* This is only applicable to service-builds (`build()`).
*/
def retryPolicy(value: RetryPolicy[Try[Nothing]]): This =
configured(Retries(value))
/**
* Sets the TCP send buffer size.
*/
def sendBufferSize(value: Int): This =
configured(params[Transport.BufferSizes].copy(send = Some(value)))
/**
* Sets the TCP recv buffer size.
*/
def recvBufferSize(value: Int): This =
configured(params[Transport.BufferSizes].copy(recv = Some(value)))
/**
* Use the given channel factory instead of the default. Note that
* when using a non-default ChannelFactory, finagle can't
* meaningfully reference count factory usage, and so the caller is
* responsible for calling `releaseExternalResources()`.
*/
def channelFactory(cf: ChannelFactory): This =
configured(Netty3Transporter.ChannelFactory(cf))
/**
* Encrypt the connection with SSL. Hostname verification will be
* provided against the given hostname.
*/
def tls(hostname: String): This = {
configured((Transport.TLSClientEngine(Some({
case inet: InetSocketAddress => Ssl.client(hostname, inet.getPort)
case _ => Ssl.client()
}))))
.configured(Transporter.TLSHostname(Some(hostname)))
}
/**
* Encrypt the connection with SSL. The Engine to use can be passed into the client.
* This allows the user to use client certificates
* No SSL Hostname Validation is performed
*/
def tls(sslContext: SSLContext): This =
configured((Transport.TLSClientEngine(Some({
case inet: InetSocketAddress => Ssl.client(sslContext, inet.getHostName, inet.getPort)
case _ => Ssl.client(sslContext)
}))))
/**
* Encrypt the connection with SSL. The Engine to use can be passed into the client.
* This allows the user to use client certificates
* SSL Hostname Validation is performed, on the passed in hostname
*/
def tls(sslContext: SSLContext, hostname: Option[String]): This =
configured((Transport.TLSClientEngine(Some({
case inet: InetSocketAddress => Ssl.client(sslContext, hostname.getOrElse(inet.getHostName), inet.getPort)
case _ => Ssl.client(sslContext)
}))))
.configured(Transporter.TLSHostname(hostname))
/**
* Do not perform TLS validation. Probably dangerous.
*/
def tlsWithoutValidation(): This =
configured(Transport.TLSClientEngine(Some({
case inet: InetSocketAddress => Ssl.clientWithoutCertificateValidation(inet.getHostName, inet.getPort)
case _ => Ssl.clientWithoutCertificateValidation()
})))
/**
* Make connections via the given HTTP proxy.
* If this is defined concurrently with socksProxy, the order in which they are applied is undefined.
*/
def httpProxy(httpProxy: SocketAddress): This =
configured(params[Transporter.HttpProxy].copy(sa = Some(httpProxy)))
/**
* For the http proxy use these [[Credentials]] for authentication.
*/
def httpProxyUsernameAndPassword(credentials: Credentials): This =
configured(params[Transporter.HttpProxy].copy(credentials = Some(credentials)))
@deprecated("Use socksProxy(socksProxy: Option[SocketAddress])", "2014-12-02")
def socksProxy(socksProxy: SocketAddress): This =
configured(params[Transporter.SocksProxy].copy(sa = Some(socksProxy)))
/**
* Make connections via the given SOCKS proxy.
* If this is defined concurrently with httpProxy, the order in which they are applied is undefined.
*/
def socksProxy(socksProxy: Option[SocketAddress]): This =
configured(params[Transporter.SocksProxy].copy(sa = socksProxy))
/**
* For the socks proxy use this username for authentication.
* socksPassword and socksProxy must be set as well
*/
def socksUsernameAndPassword(credentials: (String,String)): This =
configured(params[Transporter.SocksProxy].copy(credentials = Some(credentials)))
/**
* Specifies a tracer that receives trace events.
* See [[com.twitter.finagle.tracing]] for details.
*/
@deprecated("Use tracer() instead", "7.0.0")
def tracerFactory(factory: com.twitter.finagle.tracing.Tracer.Factory): This =
tracer(factory())
// API compatibility method
@deprecated("Use tracer() instead", "7.0.0")
def tracerFactory(t: com.twitter.finagle.tracing.Tracer): This =
tracer(t)
/**
* Specifies a tracer that receives trace events.
* See [[com.twitter.finagle.tracing]] for details.
*/
def tracer(t: com.twitter.finagle.tracing.Tracer): This =
configured(Tracer(t))
def monitor(mFactory: String => com.twitter.util.Monitor): This =
configured(MonitorFactory(mFactory))
/**
* Log very detailed debug information to the given logger.
*/
def logger(logger: java.util.logging.Logger): This =
configured(Logger(logger))
/**
* Use the given parameters for failure accrual. The first parameter
* is the number of *successive* failures that are required to mark
* a host failed. The second parameter specifies how long the host
* is dead for, once marked.
*
* To completely disable [[FailureAccrualFactory]] use `noFailureAccrual`.
*/
def failureAccrualParams(pair: (Int, Duration)): This = {
val (numFailures, markDeadFor) = pair
configured(FailureAccrualFactory.Param(numFailures, () => markDeadFor))
}
/**
* Disables [[FailureAccrualFactory]].
*
* To replace the [[FailureAccrualFactory]] use `failureAccrualFactory`.
*/
def noFailureAccrual: This =
configured(FailureAccrualFactory.Disabled)
/**
* Completely replaces the [[FailureAccrualFactory]] from the underlying stack
* with the [[ServiceFactoryWrapper]] returned from the given function `factory`.
*
* To completely disable [[FailureAccrualFactory]] use `noFailureAccrual`.
*/
def failureAccrualFactory(factory: util.Timer => ServiceFactoryWrapper): This =
configured(FailureAccrualFactory.Replaced(factory))
@deprecated(
"No longer experimental: Use failFast()." +
"The new default value is true, so replace .expFailFast(true) with nothing at all",
"5.3.10")
def expFailFast(enabled: Boolean): This =
failFast(enabled)
/**
* Marks a host dead on connection failure. The host remains dead
* until we successfully connect. Intermediate connection attempts
* *are* respected, but host availability is turned off during the
* reconnection period.
*/
def failFast(enabled: Boolean): This =
configured(FailFast(enabled))
/**
* When true, the client is daemonized. As with java threads, a
* process can exit only when all remaining clients are daemonized.
* False by default.
*/
def daemon(daemonize: Boolean): This =
configured(Daemonize(daemonize))
/**
* Provide an alternative to putting all request exceptions under
* a "failures" stat. Typical implementations may report any
* cancellations or validation errors separately so success rate
* considers only valid non cancelled requests.
*
* @param exceptionStatsHandler function to record failure details.
*/
def exceptionCategorizer(exceptionStatsHandler: stats.ExceptionStatsHandler): This =
configured(ExceptionStatsHandler(exceptionStatsHandler))
/**
* Configures the traffic class.
*
* @see [[Transporter.TrafficClass]]
*/
def trafficClass(value: Option[Int]): This =
configured(Transporter.TrafficClass(value))
/*** BUILD ***/
// This is only used for client alterations outside of the stack.
// a more ideal usage would be to retrieve the stats param inside your specific module
// instead of using this statsReceiver as it keeps the params closer to where they're used
private[finagle] lazy val statsReceiver = {
val Stats(sr) = params[Stats]
val Label(label) = params[Label]
sr.scope(label)
}
/**
* Construct a ServiceFactory. This is useful for stateful protocols
* (e.g., those that support transactions or authentication).
*/
def buildFactory()(
implicit THE_BUILDER_IS_NOT_FULLY_SPECIFIED_SEE_ClientBuilder_DOCUMENTATION:
ClientConfigEvidence[HasCluster, HasCodec, HasHostConnectionLimit]
): ServiceFactory[Req, Rep] = {
val Label(label) = params[Label]
val DestName(dest) = params[DestName]
ClientBuilderClient.newClient(client, dest, label)
}
@deprecated("Used for ABI compat", "5.0.1")
def buildFactory(
THE_BUILDER_IS_NOT_FULLY_SPECIFIED_SEE_ClientBuilder_DOCUMENTATION:
ThisConfig =:= FullySpecifiedConfig
): ServiceFactory[Req, Rep] = buildFactory()(
new ClientConfigEvidence[HasCluster, HasCodec, HasHostConnectionLimit]{})
/**
* Construct a Service.
*/
def build()(
implicit THE_BUILDER_IS_NOT_FULLY_SPECIFIED_SEE_ClientBuilder_DOCUMENTATION:
ClientConfigEvidence[HasCluster, HasCodec, HasHostConnectionLimit]
): Service[Req, Rep] = {
val Label(label) = params[Label]
val DestName(dest) = params[DestName]
ClientBuilderClient.newService(client, dest, label)
}
@deprecated("Used for ABI compat", "5.0.1")
def build(
THE_BUILDER_IS_NOT_FULLY_SPECIFIED_SEE_ClientBuilder_DOCUMENTATION:
ThisConfig =:= FullySpecifiedConfig
): Service[Req, Rep] = build()(
new ClientConfigEvidence[HasCluster, HasCodec, HasHostConnectionLimit]{})
private[this] def validated = {
if (!params.contains[DestName])
throw new IncompleteSpecification("No destination was specified")
this.asInstanceOf[ClientBuilder[Req, Rep, Yes, Yes, Yes]]
}
/**
* Construct a Service, with runtime checks for builder
* completeness.
*/
def unsafeBuild(): Service[Req, Rep] =
validated.build()
/**
* Construct a ServiceFactory, with runtime checks for builder
* completeness.
*/
def unsafeBuildFactory(): ServiceFactory[Req, Rep] =
validated.buildFactory()
}
/**
* A [[com.twitter.finagle.client.StackClient]] which adds the
* filters historically included in `ClientBuilder` clients.
*/
private case class ClientBuilderClient[Req, Rep](
client: StackClient[Req, Rep]
) extends StackClient[Req, Rep] {
def params = client.params
def withParams(ps: Stack.Params) = copy(client.withParams(ps))
def stack = client.stack
def withStack(stack: Stack[ServiceFactory[Req, Rep]]) = copy(client.withStack(stack))
def newClient(dest: Name, label: String) =
ClientBuilderClient.newClient(client, dest, label)
def newService(dest: Name, label: String) =
ClientBuilderClient.newService(client, dest, label)
}
private object ClientBuilderClient {
import ClientConfig._
import com.twitter.finagle.param._
private class RetryFilterModule[Req, Rep]
extends Stack.Module3[Stats, Retries, HighResTimer, ServiceFactory[Req, Rep]] {
override val role = new Stack.Role("ClientBuilder RetryFilter")
override val description = "Application-configured retries"
override def make(
statsP: Stats,
retriesP: Retries,
timerP: HighResTimer,
next: ServiceFactory[Req, Rep]
) = {
val Stats(statsReceiver) = statsP
val Retries(policy) = retriesP
val HighResTimer(timer) = timerP
if (policy eq RetryPolicy.Never) next
else {
val retries = new RetryExceptionsFilter[Req, Rep](policy, timer, statsReceiver)
retries andThen next
}
}
}
private class StatsFilterModule[Req, Rep]
extends Stack.Module2[Stats, ExceptionStatsHandler, ServiceFactory[Req, Rep]] {
override val role = new Stack.Role("ClientBuilder StatsFilter")
override val description = "Record request stats scoped to 'tries'"
override def make(
statsP: Stats,
exceptionStatsHandlerP: ExceptionStatsHandler,
next: ServiceFactory[Req, Rep]
) = {
val Stats(statsReceiver) = statsP
val ExceptionStatsHandler(categorizer) = exceptionStatsHandlerP
val stats = new StatsFilter[Req, Rep](statsReceiver.scope("tries"), categorizer)
stats andThen next
}
}
private class GlobalTimeoutModule[Req, Rep]
extends Stack.Module2[GlobalTimeout, Timer, ServiceFactory[Req, Rep]] {
override val role = new Stack.Role("ClientBuilder GlobalTimeoutFilter")
override val description = "Application-configured global timeout"
override def make(
globalTimeoutP: GlobalTimeout,
timerP: Timer,
next: ServiceFactory[Req, Rep]
) = {
val GlobalTimeout(timeout) = globalTimeoutP
val Timer(timer) = timerP
if (timeout == Duration.Top) next
else {
val exception = new GlobalRequestTimeoutException(timeout)
val globalTimeout = new TimeoutFilter[Req, Rep](timeout, exception, timer)
globalTimeout andThen next
}
}
}
private class ExceptionSourceFilterModule[Req, Rep]
extends Stack.Module1[Label, ServiceFactory[Req, Rep]] {
override val role = new Stack.Role("ClientBuilder ExceptionSourceFilter")
override val description = "Exception source filter"
override def make(
labelP: Label,
next: ServiceFactory[Req, Rep]
) = {
val Label(label) = labelP
val exceptionSource = new ExceptionSourceFilter[Req, Rep](label)
exceptionSource andThen next
}
}
def newClient[Req, Rep](
client: StackBasedClient[Req, Rep],
dest: Name,
label: String
): ServiceFactory[Req, Rep] = {
val params = client.params
val Daemonize(daemon) = params[Daemonize]
val Logger(logger) = params[Logger]
val MonitorFactory(mFactory) = params[MonitorFactory]
val clientParams = params + Monitor(mFactory(label))
val factory = client.withParams(clientParams).newClient(dest, label)
val exitGuard = if (!daemon) Some(ExitGuard.guard(s"client for '$label'")) else None
new ServiceFactoryProxy[Req, Rep](factory) {
private[this] val closed = new AtomicBoolean(false)
override def close(deadline: Time): Future[Unit] = {
if (!closed.compareAndSet(false, true)) {
logger.log(Level.WARNING, "Close on ServiceFactory called multiple times!",
new Exception/*stack trace please*/)
return Future.exception(new IllegalStateException)
}
super.close(deadline) ensure {
exitGuard.foreach(_.unguard())
}
}
}
}
def newService[Req, Rep](
client0: StackBasedClient[Req, Rep],
dest: Name,
label: String
): Service[Req, Rep] = {
val client =
client0
.transformed(new Stack.Transformer {
def apply[Req, Rep](stack: Stack[ServiceFactory[Req, Rep]]) =
stack
.insertBefore(Requeues.role, new StatsFilterModule[Req, Rep])
.insertBefore(Requeues.role, new RetryFilterModule[Req, Rep])
.prepend(new GlobalTimeoutModule[Req, Rep])
.prepend(new ExceptionSourceFilterModule[Req, Rep])
})
.configured(FactoryToService.Enabled(true))
val factory = newClient(client, dest, label)
val service: Service[Req, Rep] = new FactoryToService[Req, Rep](factory)
new ServiceProxy[Req, Rep](service) {
private[this] val released = new AtomicBoolean(false)
override def close(deadline: Time): Future[Unit] = {
if (!released.compareAndSet(false, true)) {
val Logger(logger) = client.params[Logger]
logger.log(java.util.logging.Level.WARNING, "Release on Service called multiple times!",
new Exception/*stack trace please*/)
return Future.exception(new IllegalStateException)
}
super.close(deadline)
}
}
}
}
/**
* A [[com.twitter.finagle.client.StackClient]] based on a
* [[com.twitter.finagle.Codec]].
*/
private case class CodecClient[Req, Rep](
codecFactory: CodecFactory[Req, Rep]#Client,
stack: Stack[ServiceFactory[Req, Rep]] = StackClient.newStack[Req, Rep],
params: Stack.Params = ClientConfig.DefaultParams
) extends StackClient[Req, Rep] {
import com.twitter.finagle.param._
def withParams(ps: Stack.Params) = copy(params = ps)
def withStack(stack: Stack[ServiceFactory[Req, Rep]]) = copy(stack = stack)
def newClient(dest: Name, label: String): ServiceFactory[Req, Rep] = {
val codec = codecFactory(ClientCodecConfig(label))
val prepConn = new Stack.Module1[Stats, ServiceFactory[Req, Rep]] {
val role = StackClient.Role.prepConn
val description = "Connection preparation phase as defined by a Codec"
def make(_stats: Stats, next: ServiceFactory[Req, Rep]) = {
val Stats(stats) = _stats
val underlying = codec.prepareConnFactory(next)
new ServiceFactoryProxy(underlying) {
val stat = stats.stat("codec_connection_preparation_latency_ms")
override def apply(conn: ClientConnection) = {
val begin = Time.now
super.apply(conn) ensure {
stat.add((Time.now - begin).inMilliseconds)
}
}
}
}
}
val clientStack = {
val stack0 = stack
.replace(StackClient.Role.prepConn, prepConn)
.replace(StackClient.Role.prepFactory, (next: ServiceFactory[Req, Rep]) =>
codec.prepareServiceFactory(next))
.replace(TraceInitializerFilter.role, codec.newTraceInitializer)
// disable failFast if the codec requests it or it is
// disabled via the ClientBuilder parameter.
val FailFast(failFast) = params[FailFast]
if (!codec.failFastOk || !failFast) stack0.remove(FailFastFactory.role) else stack0
}
case class Client(
stack: Stack[ServiceFactory[Req, Rep]] = clientStack,
params: Stack.Params = params
) extends StdStackClient[Req, Rep, Client] {
protected def copy1(
stack: Stack[ServiceFactory[Req, Rep]] = this.stack,
params: Stack.Params = this.params): Client = copy(stack, params)
protected type In = Any
protected type Out = Any
protected def newTransporter(): Transporter[Any, Any] = {
val Stats(stats) = params[Stats]
val newTransport = (ch: Channel) => codec.newClientTransport(ch, stats)
Netty3Transporter[Any, Any](codec.pipelineFactory,
params + Netty3Transporter.TransportFactory(newTransport))
}
protected def newDispatcher(transport: Transport[In, Out]) =
codec.newClientDispatcher(transport, params)
}
Client().newClient(dest, label)
}
// not called
def newService(dest: Name, label: String): Service[Req, Rep] = ???
}
|
rojanu/finagle
|
finagle-core/src/main/scala/com/twitter/finagle/builder/ClientBuilder.scala
|
Scala
|
apache-2.0
| 41,279 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql
import java.io.File
import scala.util.{Random, Try}
import org.apache.spark.SparkConf
import org.apache.spark.sql.functions.monotonically_increasing_id
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.util.{Benchmark, Utils}
/**
* Benchmark to measure read performance with Filter pushdown.
*/
object FilterPushdownBenchmark {
val conf = new SparkConf()
conf.set("orc.compression", "snappy")
conf.set("spark.sql.parquet.compression.codec", "snappy")
private val spark = SparkSession.builder()
.master("local[1]")
.appName("FilterPushdownBenchmark")
.config(conf)
.getOrCreate()
def withTempPath(f: File => Unit): Unit = {
val path = Utils.createTempDir()
path.delete()
try f(path) finally Utils.deleteRecursively(path)
}
def withTempTable(tableNames: String*)(f: => Unit): Unit = {
try f finally tableNames.foreach(spark.catalog.dropTempView)
}
def withSQLConf(pairs: (String, String)*)(f: => Unit): Unit = {
val (keys, values) = pairs.unzip
val currentValues = keys.map(key => Try(spark.conf.get(key)).toOption)
(keys, values).zipped.foreach(spark.conf.set)
try f finally {
keys.zip(currentValues).foreach {
case (key, Some(value)) => spark.conf.set(key, value)
case (key, None) => spark.conf.unset(key)
}
}
}
private def prepareTable(dir: File, numRows: Int, width: Int): Unit = {
import spark.implicits._
val selectExpr = (1 to width).map(i => s"CAST(value AS STRING) c$i")
val df = spark.range(numRows).map(_ => Random.nextLong).selectExpr(selectExpr: _*)
.withColumn("id", monotonically_increasing_id())
val dirORC = dir.getCanonicalPath + "/orc"
val dirParquet = dir.getCanonicalPath + "/parquet"
df.write.mode("overwrite").orc(dirORC)
df.write.mode("overwrite").parquet(dirParquet)
spark.read.orc(dirORC).createOrReplaceTempView("orcTable")
spark.read.parquet(dirParquet).createOrReplaceTempView("parquetTable")
}
def filterPushDownBenchmark(
values: Int,
title: String,
whereExpr: String,
selectExpr: String = "*"): Unit = {
val benchmark = new Benchmark(title, values, minNumIters = 5)
Seq(false, true).foreach { pushDownEnabled =>
val name = s"Parquet Vectorized ${if (pushDownEnabled) s"(Pushdown)" else ""}"
benchmark.addCase(name) { _ =>
withSQLConf(SQLConf.PARQUET_FILTER_PUSHDOWN_ENABLED.key -> s"$pushDownEnabled") {
spark.sql(s"SELECT $selectExpr FROM parquetTable WHERE $whereExpr").collect()
}
}
}
Seq(false, true).foreach { pushDownEnabled =>
val name = s"Native ORC Vectorized ${if (pushDownEnabled) s"(Pushdown)" else ""}"
benchmark.addCase(name) { _ =>
withSQLConf(SQLConf.ORC_FILTER_PUSHDOWN_ENABLED.key -> s"$pushDownEnabled") {
spark.sql(s"SELECT $selectExpr FROM orcTable WHERE $whereExpr").collect()
}
}
}
/*
Java HotSpot(TM) 64-Bit Server VM 1.8.0_152-b16 on Mac OS X 10.13.2
Intel(R) Core(TM) i7-4770HQ CPU @ 2.20GHz
Select 0 row (id IS NULL): Best/Avg Time(ms) Rate(M/s) Per Row(ns) Relative
-----------------------------------------------------------------------------------------------
Parquet Vectorized 7882 / 7957 2.0 501.1 1.0X
Parquet Vectorized (Pushdown) 55 / 60 285.2 3.5 142.9X
Native ORC Vectorized 5592 / 5627 2.8 355.5 1.4X
Native ORC Vectorized (Pushdown) 66 / 70 237.2 4.2 118.9X
Select 0 row (7864320 < id < 7864320): Best/Avg Time(ms) Rate(M/s) Per Row(ns) Relative
-----------------------------------------------------------------------------------------------
Parquet Vectorized 7884 / 7909 2.0 501.2 1.0X
Parquet Vectorized (Pushdown) 739 / 752 21.3 47.0 10.7X
Native ORC Vectorized 5614 / 5646 2.8 356.9 1.4X
Native ORC Vectorized (Pushdown) 81 / 83 195.2 5.1 97.8X
Select 1 row (id = 7864320): Best/Avg Time(ms) Rate(M/s) Per Row(ns) Relative
-----------------------------------------------------------------------------------------------
Parquet Vectorized 7905 / 8027 2.0 502.6 1.0X
Parquet Vectorized (Pushdown) 740 / 766 21.2 47.1 10.7X
Native ORC Vectorized 5684 / 5738 2.8 361.4 1.4X
Native ORC Vectorized (Pushdown) 78 / 81 202.4 4.9 101.7X
Select 1 row (id <=> 7864320): Best/Avg Time(ms) Rate(M/s) Per Row(ns) Relative
-----------------------------------------------------------------------------------------------
Parquet Vectorized 7928 / 7993 2.0 504.1 1.0X
Parquet Vectorized (Pushdown) 747 / 772 21.0 47.5 10.6X
Native ORC Vectorized 5728 / 5753 2.7 364.2 1.4X
Native ORC Vectorized (Pushdown) 76 / 78 207.9 4.8 104.8X
Select 1 row (7864320 <= id <= 7864320):Best/Avg Time(ms) Rate(M/s) Per Row(ns) Relative
-----------------------------------------------------------------------------------------------
Parquet Vectorized 7939 / 8021 2.0 504.8 1.0X
Parquet Vectorized (Pushdown) 746 / 770 21.1 47.4 10.6X
Native ORC Vectorized 5690 / 5734 2.8 361.7 1.4X
Native ORC Vectorized (Pushdown) 76 / 79 206.7 4.8 104.3X
Select 1 row (7864319 < id < 7864321): Best/Avg Time(ms) Rate(M/s) Per Row(ns) Relative
-----------------------------------------------------------------------------------------------
Parquet Vectorized 7972 / 8019 2.0 506.9 1.0X
Parquet Vectorized (Pushdown) 742 / 764 21.2 47.2 10.7X
Native ORC Vectorized 5704 / 5743 2.8 362.6 1.4X
Native ORC Vectorized (Pushdown) 76 / 78 207.9 4.8 105.4X
Select 10% rows (id < 1572864): Best/Avg Time(ms) Rate(M/s) Per Row(ns) Relative
-----------------------------------------------------------------------------------------------
Parquet Vectorized 8733 / 8808 1.8 555.2 1.0X
Parquet Vectorized (Pushdown) 2213 / 2267 7.1 140.7 3.9X
Native ORC Vectorized 6420 / 6463 2.4 408.2 1.4X
Native ORC Vectorized (Pushdown) 1313 / 1331 12.0 83.5 6.7X
Select 50% rows (id < 7864320): Best/Avg Time(ms) Rate(M/s) Per Row(ns) Relative
-----------------------------------------------------------------------------------------------
Parquet Vectorized 11518 / 11591 1.4 732.3 1.0X
Parquet Vectorized (Pushdown) 7962 / 7991 2.0 506.2 1.4X
Native ORC Vectorized 8927 / 8985 1.8 567.6 1.3X
Native ORC Vectorized (Pushdown) 6102 / 6160 2.6 387.9 1.9X
Select 90% rows (id < 14155776): Best/Avg Time(ms) Rate(M/s) Per Row(ns) Relative
-----------------------------------------------------------------------------------------------
Parquet Vectorized 14255 / 14389 1.1 906.3 1.0X
Parquet Vectorized (Pushdown) 13564 / 13594 1.2 862.4 1.1X
Native ORC Vectorized 11442 / 11608 1.4 727.5 1.2X
Native ORC Vectorized (Pushdown) 10991 / 11029 1.4 698.8 1.3X
Select all rows (id IS NOT NULL): Best/Avg Time(ms) Rate(M/s) Per Row(ns) Relative
-----------------------------------------------------------------------------------------------
Parquet Vectorized 14917 / 14938 1.1 948.4 1.0X
Parquet Vectorized (Pushdown) 14910 / 14964 1.1 948.0 1.0X
Native ORC Vectorized 11986 / 12069 1.3 762.0 1.2X
Native ORC Vectorized (Pushdown) 12037 / 12123 1.3 765.3 1.2X
Select all rows (id > -1): Best/Avg Time(ms) Rate(M/s) Per Row(ns) Relative
-----------------------------------------------------------------------------------------------
Parquet Vectorized 14951 / 14976 1.1 950.6 1.0X
Parquet Vectorized (Pushdown) 14934 / 15016 1.1 949.5 1.0X
Native ORC Vectorized 12000 / 12156 1.3 763.0 1.2X
Native ORC Vectorized (Pushdown) 12079 / 12113 1.3 767.9 1.2X
Select all rows (id != -1): Best/Avg Time(ms) Rate(M/s) Per Row(ns) Relative
-----------------------------------------------------------------------------------------------
Parquet Vectorized 14930 / 14972 1.1 949.3 1.0X
Parquet Vectorized (Pushdown) 15015 / 15047 1.0 954.6 1.0X
Native ORC Vectorized 12090 / 12259 1.3 768.7 1.2X
Native ORC Vectorized (Pushdown) 12021 / 12096 1.3 764.2 1.2X
*/
benchmark.run()
}
def main(args: Array[String]): Unit = {
val numRows = 1024 * 1024 * 15
val width = 5
val mid = numRows / 2
withTempPath { dir =>
withTempTable("orcTable", "patquetTable") {
prepareTable(dir, numRows, width)
Seq("id IS NULL", s"$mid < id AND id < $mid").foreach { whereExpr =>
val title = s"Select 0 row ($whereExpr)".replace("id AND id", "id")
filterPushDownBenchmark(numRows, title, whereExpr)
}
Seq(
s"id = $mid",
s"id <=> $mid",
s"$mid <= id AND id <= $mid",
s"${mid - 1} < id AND id < ${mid + 1}"
).foreach { whereExpr =>
val title = s"Select 1 row ($whereExpr)".replace("id AND id", "id")
filterPushDownBenchmark(numRows, title, whereExpr)
}
val selectExpr = (1 to width).map(i => s"MAX(c$i)").mkString("", ",", ", MAX(id)")
Seq(10, 50, 90).foreach { percent =>
filterPushDownBenchmark(
numRows,
s"Select $percent% rows (id < ${numRows * percent / 100})",
s"id < ${numRows * percent / 100}",
selectExpr
)
}
Seq("id IS NOT NULL", "id > -1", "id != -1").foreach { whereExpr =>
filterPushDownBenchmark(
numRows,
s"Select all rows ($whereExpr)",
whereExpr,
selectExpr)
}
}
}
}
}
|
brad-kaiser/spark
|
sql/core/src/test/scala/org/apache/spark/sql/FilterPushdownBenchmark.scala
|
Scala
|
apache-2.0
| 12,569 |
package autolift.scalaz
import autolift.{LiftTraverse, LiftTraverseSyntax}
import scalaz.{Functor, Applicative, Traverse}
trait ScalazLiftTraverse[Obj, Fn] extends LiftTraverse[Obj, Fn]
object ScalazLiftTraverse extends LowPriorityScalazLiftTraverse{
def apply[Obj, Fn](implicit lift: ScalazLiftTraverse[Obj, Fn]): Aux[Obj, Fn, lift.Out] = lift
implicit def base[M[_], A, B, C >: A, F[_]](implicit ap: Applicative[M], traverse: Traverse[F]): Aux[F[A], C => M[B], M[F[B]]] =
new ScalazLiftTraverse[F[A], C => M[B]]{
type Out = M[F[B]]
def apply(fa: F[A], f: C => M[B]) = traverse.traverse(fa)(f)
}
}
trait LowPriorityScalazLiftTraverse{
type Aux[Obj, Fn, Out0] = ScalazLiftTraverse[Obj, Fn]{ type Out = Out0 }
implicit def recur[F[_], G, Fn](implicit functor: Functor[F], lift: LiftTraverse[G, Fn]): Aux[F[G], Fn, F[lift.Out]] =
new ScalazLiftTraverse[F[G], Fn]{
type Out = F[lift.Out]
def apply(fg: F[G], fn: Fn) = functor.map(fg){ g: G => lift(g, fn) }
}
}
final class LiftedTraverse[M[_], A, B](protected val f: A => M[B])(implicit ap: Applicative[M]){
def map[C](g: B => C) = new LiftedTraverse({ x: A => ap.map(f(x))(g) })
def apply[That](that: That)(implicit lift: LiftTraverse[That, A => M[B]]): lift.Out = lift(that, f)
}
trait LiftedTraverseImplicits{
implicit def functor[M[_], A] = new Functor[LiftedTraverse[M, A, ?]]{
def map[B, C](lt: LiftedTraverse[M, A, B])(f: B => C) = lt map f
}
}
trait LiftTraverseContext{
def liftTraverse[M[_], A, B](f: A => M[B])(implicit ap: Applicative[M]) = new LiftedTraverse(f)
}
trait LiftTraverseExport{
implicit def mkTv[Obj, Fn](implicit lift: ScalazLiftTraverse[Obj, Fn]): ScalazLiftTraverse.Aux[Obj, Fn, lift.Out] = lift
}
trait LiftTraversePackage extends LiftTraverseExport
with LiftTraverseContext
with LiftTraverseSyntax
|
wheaties/AutoLifts
|
autolift-scalaz/src/main/scala/autolift/scalaz/LiftTraverse.scala
|
Scala
|
apache-2.0
| 1,856 |
package com.twitter.finagle.socks
import org.scalatest.FunSuite
import org.scalatest.junit.JUnitRunner
import org.junit.runner.RunWith
import org.scalatest.mock.MockitoSugar
import org.mockito.ArgumentCaptor
import org.mockito.Matchers._
import org.mockito.Mockito.{times, verify, when, atLeastOnce}
import org.jboss.netty.buffer.{ChannelBuffers, ChannelBuffer}
import java.util.Arrays
import org.jboss.netty.channel._
import java.net.{SocketAddress, InetAddress, InetSocketAddress}
import com.twitter.finagle.ConnectionFailedException
@RunWith(classOf[JUnitRunner])
class SocksConnectHandlerTest extends FunSuite with MockitoSugar {
class SocksConnectHandlerHelper {
val ctx = mock[ChannelHandlerContext]
val channel = mock[Channel]
when(ctx.getChannel) thenReturn channel
val pipeline = mock[ChannelPipeline]
when(ctx.getPipeline) thenReturn pipeline
when(channel.getPipeline) thenReturn pipeline
val closeFuture = Channels.future(channel)
when(channel.getCloseFuture) thenReturn closeFuture
val port = 80 // never bound
val portByte1 = (port >> 8).toByte
val portByte2 = (port & 0xFF).toByte
val remoteAddress = new InetSocketAddress(InetAddress.getByAddress(null, Array[Byte](0x7F, 0x0, 0x0, 0x1)), port)
when(channel.getRemoteAddress) thenReturn remoteAddress
val proxyAddress = mock[SocketAddress]
val connectFuture = Channels.future(channel, true)
val connectRequested = new DownstreamChannelStateEvent(
channel, connectFuture, ChannelState.CONNECTED, remoteAddress)
def sendBytesToServer(x: Byte, xs: Byte*) {
val ec = ArgumentCaptor.forClass(classOf[DownstreamMessageEvent])
verify(ctx, atLeastOnce()).sendDownstream(ec.capture)
val e = ec.getValue
assert(e.getMessage match {
case buf: ChannelBuffer =>
val a = Array(x, xs: _*)
val bufBytes = Array.ofDim[Byte](buf.readableBytes())
buf.getBytes(0, bufBytes)
Arrays.equals(bufBytes, a)
})
}
def receiveBytesFromServer(ch: SocksConnectHandler, bytes: Array[Byte]) {
ch.handleUpstream(ctx, new UpstreamMessageEvent(
channel, ChannelBuffers.wrappedBuffer(bytes), null))
}
def connectAndRemoveHandler(ch: SocksConnectHandler) {
assert(connectFuture.isDone)
verify(pipeline).remove(ch)
// we propagated the connect
val ec = ArgumentCaptor.forClass(classOf[UpstreamChannelStateEvent])
verify(ctx).sendUpstream(ec.capture)
val e = ec.getValue
assert(e.getChannel === channel)
assert(e.getState === ChannelState.CONNECTED)
assert(e.getValue === remoteAddress)
}
def checkDidClose() {
val ec = ArgumentCaptor.forClass(classOf[DownstreamChannelStateEvent])
verify(pipeline).sendDownstream(ec.capture)
val e = ec.getValue
assert(e.getChannel === channel)
assert(e.getFuture === closeFuture)
assert(e.getState === ChannelState.OPEN)
assert(e.getValue === java.lang.Boolean.FALSE)
}
}
test("SocksConnectHandler should with no authentication upon connect wrap the downstream connect request") {
val h = new SocksConnectHandlerHelper
import h._
val ch = new SocksConnectHandler(proxyAddress, remoteAddress)
ch.handleDownstream(ctx, connectRequested)
val ec = ArgumentCaptor.forClass(classOf[DownstreamChannelStateEvent])
verify(ctx).sendDownstream(ec.capture)
val e = ec.getValue
assert(e.getChannel === channel)
assert(e.getFuture != connectFuture) // this is proxied
assert(e.getState === ChannelState.CONNECTED)
assert(e.getValue === proxyAddress)
}
test("SocksConnectHandler should with no authentication upon connect propagate cancellation") {
val h = new SocksConnectHandlerHelper
import h._
val ch = new SocksConnectHandler(proxyAddress, remoteAddress)
ch.handleDownstream(ctx, connectRequested)
val ec = ArgumentCaptor.forClass(classOf[DownstreamChannelStateEvent])
verify(ctx).sendDownstream(ec.capture)
val e = ec.getValue
assert(!e.getFuture.isCancelled)
connectFuture.cancel()
assert(e.getFuture.isCancelled)
}
test("SocksConnectHandler should with no authentication when connect is successful not propagate success") {
val h = new SocksConnectHandlerHelper
import h._
val ch = new SocksConnectHandler(proxyAddress, remoteAddress)
ch.handleDownstream(ctx, connectRequested)
ch.handleUpstream(ctx, new UpstreamChannelStateEvent(
channel, ChannelState.CONNECTED, remoteAddress))
assert(!connectFuture.isDone)
verify(ctx, times(0)).sendUpstream(any[ChannelEvent])
verify(ctx, times(0)).sendUpstream(any[ChannelEvent])
}
test("SocksConnectHandler should with no authentication when connect is successful propagate connection cancellation") {
val h = new SocksConnectHandlerHelper
import h._
val ch = new SocksConnectHandler(proxyAddress, remoteAddress)
ch.handleDownstream(ctx, connectRequested)
ch.handleUpstream(ctx, new UpstreamChannelStateEvent(
channel, ChannelState.CONNECTED, remoteAddress))
assert(!connectFuture.isDone)
verify(ctx, times(0)).sendUpstream(any[ChannelEvent])
connectFuture.cancel()
checkDidClose()
}
test("SocksConnectHandler should with no authentication when connect is successful do SOCKS negotiation") {
val h = new SocksConnectHandlerHelper
import h._
val ch = new SocksConnectHandler(proxyAddress, remoteAddress)
ch.handleDownstream(ctx, connectRequested)
ch.handleUpstream(ctx, new UpstreamChannelStateEvent(
channel, ChannelState.CONNECTED, remoteAddress))
assert(!connectFuture.isDone)
verify(ctx, times(0)).sendUpstream(any[ChannelEvent])
{
// on connect send init
sendBytesToServer(0x05, 0x01, 0x00)
}
{
// when init response is received send connect request
receiveBytesFromServer(ch, Array[Byte](0x05, 0x00))
sendBytesToServer(0x05, 0x01, 0x00, 0x01, 0x7F, 0x00, 0x00, 0x01, portByte1, portByte2)
}
{
// when connect response is received, propagate the connect and remove the handler
receiveBytesFromServer(ch,
Array[Byte](0x05, 0x00, 0x00, 0x01, 0x7F, 0x00, 0x00, 0x01, portByte1, portByte2))
connectAndRemoveHandler(ch)
}
}
test("SocksConnectHandler should with no authentication propagate connection failure") {
val h = new SocksConnectHandlerHelper
import h._
val ch = new SocksConnectHandler(proxyAddress, remoteAddress)
ch.handleDownstream(ctx, connectRequested)
val ec = ArgumentCaptor.forClass(classOf[DownstreamChannelStateEvent])
verify(ctx).sendDownstream(ec.capture)
val e = ec.getValue
val exc = new Exception("failed to connect")
assert(!connectFuture.isDone)
e.getFuture.setFailure(exc)
assert(connectFuture.isDone)
assert(connectFuture.getCause === exc)
}
test("SocksConnectHandler should with username and password authentication when connect is successful do SOCKS negotiation") {
val h = new SocksConnectHandlerHelper
import h._
val username = "u"
val password = "pass"
val ch = new SocksConnectHandler(proxyAddress, remoteAddress,
Seq(UsernamePassAuthenticationSetting(username, password)))
ch.handleDownstream(ctx, connectRequested)
ch.handleUpstream(ctx, new UpstreamChannelStateEvent(
channel, ChannelState.CONNECTED, remoteAddress))
assert(!connectFuture.isDone)
verify(ctx, times(0)).sendUpstream(any[ChannelEvent])
{
// on connect send init
sendBytesToServer(0x05, 0x01, 0x02)
}
{
// when init response is received send user name and pass
receiveBytesFromServer(ch, Array[Byte](0x05, 0x02))
sendBytesToServer(0x01, 0x01, 0x75, 0x04, 0x70, 0x61, 0x73, 0x73)
}
{
// when authenticated response is received send connect request
receiveBytesFromServer(ch, Array[Byte](0x01, 0x00))
sendBytesToServer(0x05, 0x01, 0x00, 0x01, 0x7F, 0x00, 0x00, 0x01, portByte1, portByte2)
}
{
// when connect response is received, propagate the connect and remove the handler
receiveBytesFromServer(ch,
Array[Byte](0x05, 0x00, 0x00, 0x01, 0x7F, 0x00, 0x00, 0x01, portByte1, portByte2))
connectAndRemoveHandler(ch)
}
}
test("SocksConnectHandler should with username and password authentication when connect is successful fail SOCKS negotiation when not authenticated") {
val h = new SocksConnectHandlerHelper
import h._
val username = "u"
val password = "pass"
val ch = new SocksConnectHandler(proxyAddress, remoteAddress,
Seq(UsernamePassAuthenticationSetting(username, password)))
ch.handleDownstream(ctx, connectRequested)
ch.handleUpstream(ctx, new UpstreamChannelStateEvent(
channel, ChannelState.CONNECTED, remoteAddress))
assert(!connectFuture.isDone)
verify(ctx, times(0)).sendUpstream(any[ChannelEvent])
{
// on connect send init
sendBytesToServer(0x05, 0x01, 0x02)
}
{
// when init response is received send user name and pass
receiveBytesFromServer(ch, Array[Byte](0x05, 0x02))
sendBytesToServer(0x01, 0x01, 0x75, 0x04, 0x70, 0x61, 0x73, 0x73)
}
{
// when not authenticated response is received disconnect
receiveBytesFromServer(ch, Array[Byte](0x01, 0x01))
assert(connectFuture.isDone)
assert(connectFuture.getCause.isInstanceOf[ConnectionFailedException])
checkDidClose()
}
}
}
|
cogitate/twitter-finagle-uuid
|
finagle-core/src/test/scala/com/twitter/finagle/socks/SocksConnectHandlerTest.scala
|
Scala
|
apache-2.0
| 9,572 |
package com.orendainx.trucking.enrichment
import com.orendainx.trucking.commons.models.TruckEventTypes
import com.typesafe.config.{Config, ConfigFactory}
import scala.util.Random
/**
* @author Edgar Orendain <[email protected]>
*/
object WeatherAPI {
lazy val default = new WeatherAPI()
}
class WeatherAPI(config: Config) {
def this() = this(ConfigFactory.load())
private implicit val combinedConfig: Config = ConfigFactory.defaultOverrides()
.withFallback(config)
.withFallback(ConfigFactory.defaultReference())
.getConfig("trucking-enrichment.weatherapi")
/** Queries the weatherAPI for fog status.
*
* @param eventType The type of a driving event (e.g. "normal", "speeding", etc.)
* @return true if the weather is foggy, false otherwise
*/
def getFog(eventType: String): Int =
if (eventType == TruckEventTypes.Normal) if (Random.nextInt(100) < combinedConfig.getInt("foggy.normal-chance")) 1 else 0
else if (Random.nextInt(100) < combinedConfig.getInt("foggy.anomalous-chance")) 1 else 0
/** Queries the weatherAPI for rain status.
*
* @param eventType The type of a driving event (e.g. "normal", "speeding", etc.)
* @return true if the weather is rainy, false otherwise
*/
def getRain(eventType: String): Int =
if (eventType == TruckEventTypes.Normal) if (Random.nextInt(100) < combinedConfig.getInt("rainy.normal-chance")) 1 else 0
else if (Random.nextInt(100) < combinedConfig.getInt("rainy.anomalous-chance")) 1 else 0
/** Queries the weatherAPI for wind status.
*
* @param eventType The type of a driving event (e.g. "normal", "speeding", etc.)
* @return true if the weather is windy, false otherwise
*/
def getWind(eventType: String): Int =
if (eventType == TruckEventTypes.Normal) if (Random.nextInt(100) < combinedConfig.getInt("windy.normal-chance")) 1 else 0
else if (Random.nextInt(100) < combinedConfig.getInt("windy.anomalous-chance")) 1 else 0
}
|
orendain/trucking-iot
|
enrichment/src/main/scala/com/orendainx/trucking/enrichment/WeatherAPI.scala
|
Scala
|
apache-2.0
| 1,980 |
package dotty.tools
package backend
package jvm
import scala.annotation.switch
import scala.tools.asm
import scala.tools.asm.{Handle, Label, Opcodes}
import BCodeHelpers.InvokeStyle
/*
*
* @author Miguel Garcia, http://lamp.epfl.ch/~magarcia/ScalaCompilerCornerReloaded/
* @version 1.0
*
*/
trait BCodeBodyBuilder extends BCodeSkelBuilder {
// import global._
// import definitions._
import int._
import bTypes._
import coreBTypes._
import BCodeBodyBuilder._
/*
* Functionality to build the body of ASM MethodNode, except for `synchronized` and `try` expressions.
*/
abstract class PlainBodyBuilder(cunit: CompilationUnit) extends PlainSkelBuilder(cunit) {
import Primitives.TestOp
/* ---------------- helper utils for generating methods and code ---------------- */
def emit(opc: Int): Unit = { mnode.visitInsn(opc) }
def emitZeroOf(tk: BType): Unit = {
tk match {
case BOOL => bc.boolconst(false)
case BYTE |
SHORT |
CHAR |
INT => bc.iconst(0)
case LONG => bc.lconst(0)
case FLOAT => bc.fconst(0)
case DOUBLE => bc.dconst(0)
case UNIT => ()
case _ => emit(asm.Opcodes.ACONST_NULL)
}
}
/*
* Emits code that adds nothing to the operand stack.
* Two main cases: `tree` is an assignment,
* otherwise an `adapt()` to UNIT is performed if needed.
*/
def genStat(tree: Tree): Unit = {
lineNumber(tree)
tree match {
case Assign(lhs @ Select(qual, _), rhs) =>
val isStatic = lhs.symbol.isStaticMember
if (!isStatic) { genLoadQualifier(lhs) }
genLoad(rhs, symInfoTK(lhs.symbol))
lineNumber(tree)
// receiverClass is used in the bytecode to access the field. using sym.owner may lead to IllegalAccessError
val receiverClass = qual.tpe.typeSymbol
fieldStore(lhs.symbol, receiverClass)
case Assign(lhs, rhs) =>
val s = lhs.symbol
val Local(tk, _, idx, _) = locals.getOrMakeLocal(s)
genLoad(rhs, tk)
lineNumber(tree)
bc.store(idx, tk)
case _ =>
genLoad(tree, UNIT)
}
}
def genThrow(expr: Tree): BType = {
val thrownKind = tpeTK(expr)
// `throw null` is valid although scala.Null (as defined in src/libray-aux) isn't a subtype of Throwable.
// Similarly for scala.Nothing (again, as defined in src/libray-aux).
assert(thrownKind.isNullType || thrownKind.isNothingType || thrownKind.asClassBType.isSubtypeOf(ThrowableReference))
genLoad(expr, thrownKind)
lineNumber(expr)
emit(asm.Opcodes.ATHROW) // ICode enters here into enterIgnoreMode, we'll rely instead on DCE at ClassNode level.
RT_NOTHING // always returns the same, the invoker should know :)
}
/* Generate code for primitive arithmetic operations. */
def genArithmeticOp(tree: Tree, code: Int): BType = tree match{
case Apply(fun @ Select(larg, _), args) =>
var resKind = tpeTK(larg)
assert(resKind.isNumericType || (resKind == BOOL),
s"$resKind is not a numeric or boolean type [operation: ${fun.symbol}]")
import ScalaPrimitivesOps._
args match {
// unary operation
case Nil =>
genLoad(larg, resKind)
code match {
case POS => () // nothing
case NEG => bc.neg(resKind)
case NOT => bc.genPrimitiveArithmetic(Primitives.NOT, resKind)
case _ => abort(s"Unknown unary operation: ${fun.symbol.showFullName} code: $code")
}
// binary operation
case rarg :: Nil =>
val isShift = isShiftOp(code)
resKind = tpeTK(larg).maxType(if (isShift) INT else tpeTK(rarg))
if (isShift || isBitwiseOp(code)) {
assert(resKind.isIntegralType || (resKind == BOOL),
s"$resKind incompatible with arithmetic modulo operation.")
}
genLoad(larg, resKind)
genLoad(rarg, if (isShift) INT else resKind)
(code: @switch) match {
case ADD => bc add resKind
case SUB => bc sub resKind
case MUL => bc mul resKind
case DIV => bc div resKind
case MOD => bc rem resKind
case OR | XOR | AND => bc.genPrimitiveLogical(code, resKind)
case LSL | LSR | ASR => bc.genPrimitiveShift(code, resKind)
case _ => abort(s"Unknown primitive: ${fun.symbol}[$code]")
}
case _ =>
abort(s"Too many arguments for primitive function: $tree")
}
lineNumber(tree)
resKind
}
/* Generate primitive array operations. */
def genArrayOp(tree: Tree, code: Int, expectedType: BType): BType = tree match{
case Apply(Select(arrayObj, _), args) =>
import ScalaPrimitivesOps._
val k = tpeTK(arrayObj)
genLoad(arrayObj, k)
val elementType = typeOfArrayOp.getOrElse[bTypes.BType](code, abort(s"Unknown operation on arrays: $tree code: $code"))
var generatedType = expectedType
if (isArrayGet(code)) {
// load argument on stack
assert(args.length == 1, s"Too many arguments for array get operation: $tree");
genLoad(args.head, INT)
generatedType = k.asArrayBType.componentType
bc.aload(elementType)
}
else if (isArraySet(code)) {
val List(a1, a2) = args
genLoad(a1, INT)
genLoad(a2)
generatedType = UNIT
bc.astore(elementType)
} else {
generatedType = INT
emit(asm.Opcodes.ARRAYLENGTH)
}
lineNumber(tree)
generatedType
}
def genLoadIf(tree: If, expectedType: BType): BType = tree match{
case If(condp, thenp, elsep) =>
val success = new asm.Label
val failure = new asm.Label
val hasElse = !elsep.isEmpty && (elsep match {
case Literal(value) if value.tag == UnitTag => false
case _ => true
})
val postIf = if (hasElse) new asm.Label else failure
genCond(condp, success, failure, targetIfNoJump = success)
markProgramPoint(success)
val thenKind = tpeTK(thenp)
val elseKind = if (!hasElse) UNIT else tpeTK(elsep)
def hasUnitBranch = (thenKind == UNIT || elseKind == UNIT) && expectedType == UNIT
val resKind = if (hasUnitBranch) UNIT else tpeTK(tree)
genLoad(thenp, resKind)
if (hasElse) { bc goTo postIf }
markProgramPoint(failure)
if (hasElse) {
genLoad(elsep, resKind)
markProgramPoint(postIf)
}
resKind
}
def genPrimitiveOp(tree: Apply, expectedType: BType): BType = tree match {
case Apply(fun @ Select(receiver, _), _) =>
val sym = tree.symbol
val code = primitives.getPrimitive(tree, receiver.tpe)
import ScalaPrimitivesOps._
if (isArithmeticOp(code)) genArithmeticOp(tree, code)
else if (code == CONCAT) genStringConcat(tree)
else if (code == HASH) genScalaHash(receiver)
else if (isArrayOp(code)) genArrayOp(tree, code, expectedType)
else if (isLogicalOp(code) || isComparisonOp(code)) {
val success, failure, after = new asm.Label
genCond(tree, success, failure, targetIfNoJump = success)
// success block
markProgramPoint(success)
bc boolconst true
bc goTo after
// failure block
markProgramPoint(failure)
bc boolconst false
// after
markProgramPoint(after)
BOOL
}
else if (isCoercion(code)) {
genLoad(receiver)
lineNumber(tree)
genCoercion(code)
coercionTo(code)
}
else abort(
s"Primitive operation not handled yet: ${sym.showFullName}(${fun.symbol.name}) at: ${tree.pos}"
)
}
def genLoad(tree: Tree): Unit = {
genLoad(tree, tpeTK(tree))
}
/* Generate code for trees that produce values on the stack */
def genLoad(tree: Tree, expectedType: BType): Unit = {
var generatedType = expectedType
lineNumber(tree)
tree match {
case lblDf @ LabelDef(_, _, _) => genLabelDef(lblDf, expectedType)
case ValDef(_, `nme_THIS`, _, _) =>
debuglog("skipping trivial assign to _$this: " + tree)
case ValDef(_, _, _, rhs) =>
val sym = tree.symbol
/* most of the time, !locals.contains(sym), unless the current activation of genLoad() is being called
while duplicating a finalizer that contains this ValDef. */
val loc = locals.getOrMakeLocal(sym)
val Local(tk, _, idx, isSynth) = loc
if (rhs == EmptyTree) { emitZeroOf(tk) }
else { genLoad(rhs, tk) }
val localVarStart = currProgramPoint()
bc.store(idx, tk)
if (!isSynth) { // there are case <synthetic> ValDef's emitted by patmat
varsInScope ::= (sym -> localVarStart)
}
generatedType = UNIT
case t @ If(_, _, _) =>
generatedType = genLoadIf(t, expectedType)
case t @ Labeled(_, _) =>
generatedType = genLabeled(t)
case r @ Return(_) =>
genReturn(r)
generatedType = expectedType
case t @ WhileDo(_, _) =>
generatedType = genWhileDo(t)
case t @ Try(_, _, _) =>
generatedType = genLoadTry(t)
case Throw(expr) =>
generatedType = genThrow(expr)
case New(tpt) =>
abort(s"Unexpected New(${tpt.summaryString}/$tpt) reached GenBCode.\\n" +
" Call was genLoad" + ((tree, expectedType)))
case app @ Closure(env, call, functionalInterface) =>
val (fun, args) = call match {
case Apply(fun, args) => (fun, args)
case t @ Select(_, _) => (t, Nil)
case t @ Ident(_) => (t, Nil)
}
if (!fun.symbol.isStaticMember) {
// load receiver of non-static implementation of lambda
// darkdimius: I haven't found in spec `this` reference should go
// but I was able to derrive it by reading
// AbstractValidatingLambdaMetafactory.validateMetafactoryArgs
val Select(prefix, _) = fun
genLoad(prefix)
}
genLoadArguments(env, fun.symbol.info.paramTypes map toTypeKind)
generatedType = genInvokeDynamicLambda(NoSymbol, fun.symbol, env.size, functionalInterface)
case app @ Apply(_, _) =>
generatedType = genApply(app, expectedType)
case ApplyDynamic(qual, args) => sys.error("No invokedynamic support yet.")
case This(qual) =>
val symIsModuleClass = tree.symbol.isModuleClass
assert(tree.symbol == claszSymbol || symIsModuleClass,
s"Trying to access the this of another class: tree.symbol = ${tree.symbol}, class symbol = $claszSymbol compilation unit: $cunit")
if (symIsModuleClass && tree.symbol != claszSymbol) {
generatedType = genLoadModule(tree)
}
else {
mnode.visitVarInsn(asm.Opcodes.ALOAD, 0)
generatedType =
if (tree.symbol == ArrayClass) ObjectReference
else classBTypeFromSymbol(claszSymbol)
}
case Select(Ident(`nme_EMPTY_PACKAGE_NAME`), module) =>
assert(tree.symbol.isModule, s"Selection of non-module from empty package: $tree sym: ${tree.symbol} at: ${tree.pos}")
genLoadModule(tree)
case Select(qualifier, _) =>
val sym = tree.symbol
generatedType = symInfoTK(sym)
val qualSafeToElide = isQualifierSafeToElide(qualifier)
def genLoadQualUnlessElidable(): Unit = { if (!qualSafeToElide) { genLoadQualifier(tree) } }
// receiverClass is used in the bytecode to access the field. using sym.owner may lead to IllegalAccessError
def receiverClass = qualifier.tpe.typeSymbol
if (sym.isModule) {
genLoadQualUnlessElidable()
genLoadModule(tree)
} else if (sym.isStaticMember) {
genLoadQualUnlessElidable()
fieldLoad(sym, receiverClass)
} else {
genLoadQualifier(tree)
fieldLoad(sym, receiverClass)
}
case t @ Ident(name) =>
val sym = tree.symbol
val tk = symInfoTK(sym)
generatedType = tk
val desugared = desugarIdent(t)
desugared match {
case None =>
if (!sym.hasPackageFlag) {
if (sym.isModule) genLoadModule(sym)
else locals.load(sym)
}
case Some(t) =>
genLoad(t, generatedType)
}
case Literal(value) =>
if (value.tag != UnitTag) (value.tag, expectedType) match {
case (IntTag, LONG ) => bc.lconst(value.longValue); generatedType = LONG
case (FloatTag, DOUBLE) => bc.dconst(value.doubleValue); generatedType = DOUBLE
case (NullTag, _ ) => bc.emit(asm.Opcodes.ACONST_NULL); generatedType = RT_NULL
case _ => genConstant(value); generatedType = tpeTK(tree)
}
case blck @ Block(stats, expr) =>
if(stats.isEmpty)
genLoad(expr, expectedType)
else genBlock(blck, expectedType)
case Typed(Super(_, _), _) => genLoad(This(claszSymbol), expectedType)
case Typed(expr, _) => genLoad(expr, expectedType)
case Assign(_, _) =>
generatedType = UNIT
genStat(tree)
case av @ ArrayValue(_, _) =>
generatedType = genArrayValue(av)
case mtch @ Match(_, _) =>
generatedType = genMatch(mtch)
case EmptyTree => if (expectedType != UNIT) { emitZeroOf(expectedType) }
case t: TypeApply => // dotty specific
generatedType = genTypeApply(t)
case _ => abort(s"Unexpected tree in genLoad: $tree/${tree.getClass} at: ${tree.pos}")
}
// emit conversion
if (generatedType != expectedType) {
adapt(generatedType, expectedType)
}
} // end of GenBCode.genLoad()
// ---------------- field load and store ----------------
/*
* must-single-thread
*/
def fieldLoad( field: Symbol, hostClass: Symbol = null): Unit = fieldOp(field, isLoad = true, hostClass)
/*
* must-single-thread
*/
def fieldStore(field: Symbol, hostClass: Symbol = null): Unit = fieldOp(field, isLoad = false, hostClass)
/*
* must-single-thread
*/
private def fieldOp(field: Symbol, isLoad: Boolean, specificReceiver: Symbol): Unit = {
val useSpecificReceiver = specificReceiver != null && !field.isScalaStatic
val owner = internalName(if (useSpecificReceiver) specificReceiver else field.owner)
val fieldJName = field.javaSimpleName.toString
val fieldDescr = symInfoTK(field).descriptor
val isStatic = field.isStaticMember
val opc =
if (isLoad) { if (isStatic) asm.Opcodes.GETSTATIC else asm.Opcodes.GETFIELD }
else { if (isStatic) asm.Opcodes.PUTSTATIC else asm.Opcodes.PUTFIELD }
mnode.visitFieldInsn(opc, owner, fieldJName, fieldDescr)
}
// ---------------- emitting constant values ----------------
/*
* For const.tag in {ClazzTag, EnumTag}
* must-single-thread
* Otherwise it's safe to call from multiple threads.
*/
def genConstant(const: Constant): Unit = {
(const.tag/*: @switch*/) match {
case BooleanTag => bc.boolconst(const.booleanValue)
case ByteTag => bc.iconst(const.byteValue)
case ShortTag => bc.iconst(const.shortValue)
case CharTag => bc.iconst(const.charValue)
case IntTag => bc.iconst(const.intValue)
case LongTag => bc.lconst(const.longValue)
case FloatTag => bc.fconst(const.floatValue)
case DoubleTag => bc.dconst(const.doubleValue)
case UnitTag => ()
case StringTag =>
assert(const.value != null, const) // TODO this invariant isn't documented in `case class Constant`
mnode.visitLdcInsn(const.stringValue) // `stringValue` special-cases null, but not for a const with StringTag
case NullTag => emit(asm.Opcodes.ACONST_NULL)
case ClazzTag =>
val toPush: BType = {
toTypeKind(const.typeValue) match {
case kind: PrimitiveBType => boxedClassOfPrimitive(kind)
case kind => kind
}
}
mnode.visitLdcInsn(toPush.toASMType)
case EnumTag =>
val sym = const.symbolValue
val ownerName = internalName(sym.owner)
val fieldName = sym.javaSimpleName.toString
val fieldDesc = toTypeKind(sym.tpe.underlying).descriptor
mnode.visitFieldInsn(
asm.Opcodes.GETSTATIC,
ownerName,
fieldName,
fieldDesc
)
case _ => abort(s"Unknown constant value: $const")
}
}
private def genLabelDef(lblDf: LabelDef, expectedType: BType): Unit = lblDf match {
case LabelDef(_, _, rhs) =>
assert(int.hasLabelDefs) // scalac
// duplication of LabelDefs contained in `finally`-clauses is handled when emitting RETURN. No bookkeeping for that required here.
// no need to call index() over lblDf.params, on first access that magic happens (moreover, no LocalVariableTable entries needed for them).
markProgramPoint(programPoint(lblDf.symbol))
lineNumber(lblDf)
genLoad(rhs, expectedType)
}
private def genLabeled(tree: Labeled): BType = tree match {
case Labeled(bind, expr) =>
val resKind = tpeTK(tree)
genLoad(expr, resKind)
markProgramPoint(programPoint(bind.symbol))
resKind
}
private def genReturn(r: Return): Unit = r match {
case Return(expr, fromSym) =>
if (NoSymbol == fromSym) {
// return from enclosing method
val returnedKind = tpeTK(expr)
genLoad(expr, returnedKind)
adapt(returnedKind, returnType)
val saveReturnValue = (returnType != UNIT)
lineNumber(r)
cleanups match {
case Nil =>
// not an assertion: !shouldEmitCleanup (at least not yet, pendingCleanups() may still have to run, and reset `shouldEmitCleanup`.
bc emitRETURN returnType
case nextCleanup :: rest =>
if (saveReturnValue) {
// regarding return value, the protocol is: in place of a `return-stmt`, a sequence of `adapt, store, jump` are inserted.
if (earlyReturnVar == null) {
earlyReturnVar = locals.makeLocal(returnType, "earlyReturnVar", expr.tpe, expr.pos)
}
locals.store(earlyReturnVar)
}
bc goTo nextCleanup
shouldEmitCleanup = true
}
} else {
// return from labeled
assert(fromSym.isLabel, fromSym)
assert(!fromSym.isMethod, fromSym)
/* TODO At the moment, we disregard cleanups, because by construction we don't have return-from-labels
* that cross cleanup boundaries. However, in theory such crossings are valid, so we should take care
* of them.
*/
val resultKind = toTypeKind(fromSym.info)
genLoad(expr, resultKind)
lineNumber(r)
bc goTo programPoint(fromSym)
}
} // end of genReturn()
def genWhileDo(tree: WhileDo): BType = tree match{
case WhileDo(cond, body) =>
val isInfinite = cond == EmptyTree
val loop = new asm.Label
markProgramPoint(loop)
if (isInfinite) {
genLoad(body, UNIT)
bc goTo loop
RT_NOTHING
} else {
val hasBody = cond match {
case Literal(value) if value.tag == UnitTag => false
case _ => true
}
if (hasBody) {
val success = new asm.Label
val failure = new asm.Label
genCond(cond, success, failure, targetIfNoJump = success)
markProgramPoint(success)
genLoad(body, UNIT)
bc goTo loop
markProgramPoint(failure)
} else {
// this is the shape of do..while loops, so do something smart about them
val failure = new asm.Label
genCond(cond, loop, failure, targetIfNoJump = failure)
markProgramPoint(failure)
}
UNIT
}
}
def genTypeApply(t: TypeApply): BType = t match {
case TypeApply(fun@Select(obj, _), targs) =>
val sym = fun.symbol
val cast = sym match {
case Object_isInstanceOf => false
case Object_asInstanceOf => true
case _ => abort(s"Unexpected type application $fun[sym: ${sym.showFullName}] in: $t")
}
val l = tpeTK(obj)
val r = tpeTK(targs.head)
genLoadQualifier(fun)
// TODO @lry make pattern match
if (l.isPrimitive && r.isPrimitive)
genConversion(l, r, cast)
else if (l.isPrimitive) {
bc drop l
if (cast) {
mnode.visitTypeInsn(asm.Opcodes.NEW, classCastExceptionReference.internalName)
bc dup ObjectReference
emit(asm.Opcodes.ATHROW)
} else {
bc boolconst false
}
}
else if (r.isPrimitive && cast) {
abort(s"Erasure should have added an unboxing operation to prevent this cast. Tree: $t")
}
else if (r.isPrimitive) {
bc isInstance boxedClassOfPrimitive(r.asPrimitiveBType)
}
else {
assert(r.isRef, r) // ensure that it's not a method
genCast(r.asRefBType, cast)
}
if (cast) r else BOOL
} // end of genTypeApply()
private def mkArrayConstructorCall(arr: ArrayBType, app: Apply, args: List[Tree]) = {
val dims = arr.dimension
var elemKind = arr.elementType
val argsSize = args.length
if (argsSize > dims) {
error(app.pos, s"too many arguments for array constructor: found ${args.length} but array has only $dims dimension(s)")
}
if (argsSize < dims) {
/* In one step:
* elemKind = new BType(BType.ARRAY, arr.off + argsSize, arr.len - argsSize)
* however the above does not enter a TypeName for each nested arrays in chrs.
*/
for (i <- args.length until dims) elemKind = ArrayBType(elemKind)
}
genLoadArguments(args, List.fill(args.size)(INT))
(argsSize /*: @switch*/) match {
case 1 => bc newarray elemKind
case _ =>
val descr = ("[" * argsSize) + elemKind.descriptor // denotes the same as: arrayN(elemKind, argsSize).descriptor
mnode.visitMultiANewArrayInsn(descr, argsSize)
}
}
private def genApply(app: Apply, expectedType: BType): BType = {
var generatedType = expectedType
lineNumber(app)
app match {
case Apply(_, args) if isSyntheticArrayConstructor(app.symbol) =>
val List(elemClaz, Literal(c: Constant), ArrayValue(_, dims)) = args
generatedType = toTypeKind(c.typeValue)
mkArrayConstructorCall(generatedType.asArrayBType, app, dims)
case Apply(t :TypeApply, _) =>
generatedType =
if (t.symbol ne Object_synchronized) genTypeApply(t)
else genSynchronized(app, expectedType)
case Apply(fun @ Select(Super(_, _), _), args) =>
def initModule(): Unit = {
// we initialize the MODULE$ field immediately after the super ctor
if (!isModuleInitialized &&
jMethodName == INSTANCE_CONSTRUCTOR_NAME &&
fun.symbol.javaSimpleName.toString == INSTANCE_CONSTRUCTOR_NAME &&
claszSymbol.isStaticModuleClass) {
isModuleInitialized = true
mnode.visitVarInsn(asm.Opcodes.ALOAD, 0)
mnode.visitFieldInsn(
asm.Opcodes.PUTSTATIC,
thisName,
MODULE_INSTANCE_FIELD,
"L" + thisName + ";"
)
}
}
// 'super' call: Note: since constructors are supposed to
// return an instance of what they construct, we have to take
// special care. On JVM they are 'void', and Scala forbids (syntactically)
// to call super constructors explicitly and/or use their 'returned' value.
// therefore, we can ignore this fact, and generate code that leaves nothing
// on the stack (contrary to what the type in the AST says).
mnode.visitVarInsn(asm.Opcodes.ALOAD, 0)
genLoadArguments(args, paramTKs(app))
generatedType = genCallMethod(fun.symbol, InvokeStyle.Super, app.pos)
initModule()
// 'new' constructor call: Note: since constructors are
// thought to return an instance of what they construct,
// we have to 'simulate' it by DUPlicating the freshly created
// instance (on JVM, <init> methods return VOID).
case Apply(fun @ Select(New(tpt), `nme_CONSTRUCTOR`), args) =>
val ctor = fun.symbol
assert(ctor.isClassConstructor, s"'new' call to non-constructor: ${ctor.name}")
generatedType = toTypeKind(tpt)
assert(generatedType.isRef, s"Non reference type cannot be instantiated: $generatedType")
generatedType match {
case arr: ArrayBType =>
mkArrayConstructorCall(arr, app, args)
case rt: ClassBType =>
assert(classBTypeFromSymbol(ctor.owner) == rt, s"Symbol ${ctor.owner.showFullName} is different from $rt")
mnode.visitTypeInsn(asm.Opcodes.NEW, rt.internalName)
bc dup generatedType
genLoadArguments(args, paramTKs(app))
genCallMethod(ctor, InvokeStyle.Special, app.pos)
case _ =>
abort(s"Cannot instantiate $tpt of kind: $generatedType")
}
case Apply(fun, List(expr)) if isBox(fun.symbol) =>
val nativeKind = tpeTK(expr)
genLoad(expr, nativeKind)
val MethodNameAndType(mname, methodType) = asmBoxTo(nativeKind)
bc.invokestatic(BoxesRunTime.internalName, mname, methodType.descriptor, itf = false)
generatedType = boxResultType(fun.symbol) // was toTypeKind(fun.symbol.tpe.resultType)
case Apply(fun, List(expr)) if isUnbox(fun.symbol) =>
genLoad(expr)
val boxType = unboxResultType(fun.symbol) // was toTypeKind(fun.symbol.owner.linkedClassOfClass.tpe)
generatedType = boxType
val MethodNameAndType(mname, methodType) = asmUnboxTo(boxType)
bc.invokestatic(BoxesRunTime.internalName, mname, methodType.descriptor, itf = false)
case app @ Apply(fun, args) =>
val sym = fun.symbol
if (sym.isLabel) { // jump to a label
assert(int.hasLabelDefs)
genLoadLabelArguments(args, labelDef(sym), app.pos)
bc goTo programPoint(sym)
} else if (isPrimitive(fun)) { // primitive method call
generatedType = genPrimitiveOp(app, expectedType)
} else { // normal method call
val invokeStyle =
if (sym.isStaticMember) InvokeStyle.Static
else if (sym.isPrivate || sym.isClassConstructor) InvokeStyle.Special
else InvokeStyle.Virtual
if (invokeStyle.hasInstance) genLoadQualifier(fun)
genLoadArguments(args, paramTKs(app))
val Select(qual, _) = fun // fun is a Select, also checked in genLoadQualifier
if (isArrayClone(fun)) {
// Special-case Array.clone, introduced in 36ef60e. The goal is to generate this call
// as "[I.clone" instead of "java/lang/Object.clone". This is consistent with javac.
// Arrays have a public method `clone` (jls 10.7).
//
// The JVMS is not explicit about this, but that receiver type can be an array type
// descriptor (instead of a class internal name):
// invokevirtual #2; //Method "[I".clone:()Ljava/lang/Object
//
// Note that using `Object.clone()` would work as well, but only because the JVM
// relaxes protected access specifically if the receiver is an array:
// http://hg.openjdk.java.net/jdk8/jdk8/hotspot/file/87ee5ee27509/src/share/vm/interpreter/linkResolver.cpp#l439
// Example: `class C { override def clone(): Object = "hi" }`
// Emitting `def f(c: C) = c.clone()` as `Object.clone()` gives a VerifyError.
val target: String = tpeTK(qual).asRefBType.classOrArrayType
val methodBType = asmMethodType(sym)
bc.invokevirtual(target, sym.javaSimpleName.toString, methodBType.descriptor)
generatedType = methodBType.returnType
} else {
val receiverClass = if (!invokeStyle.isVirtual) null else {
// receiverClass is used in the bytecode to as the method receiver. using sym.owner
// may lead to IllegalAccessErrors, see 9954eaf / aladdin bug 455.
val qualSym = qual.tpe.typeSymbol
if (qualSym == ArrayClass) {
// For invocations like `Array(1).hashCode` or `.wait()`, use Object as receiver
// in the bytecode. Using the array descriptor (like we do for clone above) seems
// to work as well, but it seems safer not to change this. Javac also uses Object.
// Note that array apply/update/length are handled by isPrimitive (above).
assert(sym.owner == ObjectClass, s"unexpected array call: $app")
ObjectClass
} else qualSym
}
generatedType = genCallMethod(sym, invokeStyle, app.pos, receiverClass)
}
}
}
generatedType
} // end of genApply()
private def genArrayValue(av: ArrayValue): BType = av match {
case ArrayValue(tpt, elems) =>
val ArrayValue(tpt, elems) = av
lineNumber(av)
genArray(elems, tpt)
}
private def genArray(elems: List[Tree], elemType: Type): BType = {
val elmKind = toTypeKind(elemType)
val generatedType = ArrayBType(elmKind)
bc iconst elems.length
bc newarray elmKind
var i = 0
var rest = elems
while (!rest.isEmpty) {
bc dup generatedType
bc iconst i
genLoad(rest.head, elmKind)
bc astore elmKind
rest = rest.tail
i = i + 1
}
generatedType
}
/*
* A Match node contains one or more case clauses,
* each case clause lists one or more Int values to use as keys, and a code block.
* Except the "default" case clause which (if it exists) doesn't list any Int key.
*
* On a first pass over the case clauses, we flatten the keys and their targets (the latter represented with asm.Labels).
* That representation allows JCodeMethodV to emit a lookupswitch or a tableswitch.
*
* On a second pass, we emit the switch blocks, one for each different target.
*/
private def genMatch(tree: Match): BType = tree match {
case Match(selector, cases) =>
lineNumber(tree)
genLoad(selector, INT)
val generatedType = tpeTK(tree)
var flatKeys: List[Int] = Nil
var targets: List[asm.Label] = Nil
var default: asm.Label = null
var switchBlocks: List[(asm.Label, Tree)] = Nil
// collect switch blocks and their keys, but don't emit yet any switch-block.
for (caze @ CaseDef(pat, guard, body) <- cases) {
assert(guard == EmptyTree, guard)
val switchBlockPoint = new asm.Label
switchBlocks ::= (switchBlockPoint, body)
pat match {
case Literal(value) =>
flatKeys ::= value.intValue
targets ::= switchBlockPoint
case Ident(`nme_WILDCARD`) =>
assert(default == null, s"multiple default targets in a Match node, at ${tree.pos}")
default = switchBlockPoint
case Alternative(alts) =>
alts foreach {
case Literal(value) =>
flatKeys ::= value.intValue
targets ::= switchBlockPoint
case _ =>
abort(s"Invalid alternative in alternative pattern in Match node: $tree at: ${tree.pos}")
}
case _ =>
abort(s"Invalid pattern in Match node: $tree at: ${tree.pos}")
}
}
bc.emitSWITCH(mkArrayReverse(flatKeys), mkArrayL(targets.reverse), default, MIN_SWITCH_DENSITY)
// emit switch-blocks.
val postMatch = new asm.Label
for (sb <- switchBlocks.reverse) {
val (caseLabel, caseBody) = sb
markProgramPoint(caseLabel)
genLoad(caseBody, generatedType)
bc goTo postMatch
}
markProgramPoint(postMatch)
generatedType
}
def genBlock(tree: Block, expectedType: BType) = tree match {
case Block(stats, expr) =>
val savedScope = varsInScope
varsInScope = Nil
stats foreach genStat
genLoad(expr, expectedType)
val end = currProgramPoint()
if (emitVars) {
// add entries to LocalVariableTable JVM attribute
for ((sym, start) <- varsInScope.reverse) {
emitLocalVarScope(sym, start, end)
}
}
varsInScope = savedScope
}
def adapt(from: BType, to: BType): Unit = {
if (!from.conformsTo(to)) {
to match {
case UNIT => bc drop from
case _ => bc.emitT2T(from, to)
}
} else if (from.isNothingType) {
/* There are two possibilities for from.isNothingType: emitting a "throw e" expressions and
* loading a (phantom) value of type Nothing.
*
* The Nothing type in Scala's type system does not exist in the JVM. In bytecode, Nothing
* is mapped to scala.runtime.Nothing$. To the JVM, a call to Predef.??? looks like it would
* return an object of type Nothing$. We need to do something with that phantom object on
* the stack. "Phantom" because it never exists: such methods always throw, but the JVM does
* not know that.
*
* Note: The two verifiers (old: type inference, new: type checking) have different
* requirements. Very briefly:
*
* Old (http://docs.oracle.com/javase/specs/jvms/se8/html/jvms-4.html#jvms-4.10.2.1): at
* each program point, no matter what branches were taken to get there
* - Stack is same size and has same typed values
* - Local and stack values need to have consistent types
* - In practice, the old verifier seems to ignore unreachable code and accept any
* instructions after an ATHROW. For example, there can be another ATHROW (without
* loading another throwable first).
*
* New (http://docs.oracle.com/javase/specs/jvms/se8/html/jvms-4.html#jvms-4.10.1)
* - Requires consistent stack map frames. GenBCode generates stack frames if -target:jvm-1.6
* or higher.
* - In practice: the ASM library computes stack map frames for us (ClassWriter). Emitting
* correct frames after an ATHROW is probably complex, so ASM uses the following strategy:
* - Every time when generating an ATHROW, a new basic block is started.
* - During classfile writing, such basic blocks are found to be dead: no branches go there
* - Eliminating dead code would probably require complex shifts in the output byte buffer
* - But there's an easy solution: replace all code in the dead block with with
* `nop; nop; ... nop; athrow`, making sure the bytecode size stays the same
* - The corresponding stack frame can be easily generated: on entering a dead the block,
* the frame requires a single Throwable on the stack.
* - Since there are no branches to the dead block, the frame requirements are never violated.
*
* To summarize the above: it does matter what we emit after an ATHROW.
*
* NOW: if we end up here because we emitted a load of a (phantom) value of type Nothing$,
* there was no ATHROW emitted. So, we have to make the verifier happy and do something
* with that value. Since Nothing$ extends Throwable, the easiest is to just emit an ATHROW.
*
* If we ended up here because we generated a "throw e" expression, we know the last
* emitted instruction was an ATHROW. As explained above, it is OK to emit a second ATHROW,
* the verifiers will be happy.
*/
if (lastInsn.getOpcode != asm.Opcodes.ATHROW)
emit(asm.Opcodes.ATHROW)
} else if (from.isNullType) {
/* After loading an expression of type `scala.runtime.Null$`, introduce POP; ACONST_NULL.
* This is required to pass the verifier: in Scala's type system, Null conforms to any
* reference type. In bytecode, the type Null is represented by scala.runtime.Null$, which
* is not a subtype of all reference types. Example:
*
* def nl: Null = null // in bytecode, nl has return type scala.runtime.Null$
* val a: String = nl // OK for Scala but not for the JVM, scala.runtime.Null$ does not conform to String
*
* In order to fix the above problem, the value returned by nl is dropped and ACONST_NULL is
* inserted instead - after all, an expression of type scala.runtime.Null$ can only be null.
*/
if (lastInsn.getOpcode != asm.Opcodes.ACONST_NULL) {
bc drop from
emit(asm.Opcodes.ACONST_NULL)
}
}
else (from, to) match {
case (BYTE, LONG) | (SHORT, LONG) | (CHAR, LONG) | (INT, LONG) => bc.emitT2T(INT, LONG)
case _ => ()
}
}
/* Emit code to Load the qualifier of `tree` on top of the stack. */
def genLoadQualifier(tree: Tree): Unit = {
lineNumber(tree)
tree match {
case Select(qualifier, _) => genLoad(qualifier)
case t: Ident => // dotty specific
desugarIdent(t) match {
case Some(sel) => genLoadQualifier(sel)
case None =>
assert(t.symbol.owner == this.claszSymbol)
}
case _ => abort(s"Unknown qualifier $tree")
}
}
/* Generate code that loads args into label parameters. */
def genLoadLabelArguments(args: List[Tree], lblDef: LabelDef, gotoPos: Position) = lblDef match {
case LabelDef(_, param, _) =>
val aps = {
val params: List[Symbol] = param
assert(args.length == params.length, s"Wrong number of arguments in call to label at: $gotoPos")
def isTrivial(kv: (Tree, Symbol)) = kv match {
case (This(_), p) if p.name == nme_THIS => true
case (arg @ Ident(_), p) if arg.symbol == p => true
case _ => false
}
(args zip params) filterNot isTrivial
}
// first push *all* arguments. This makes sure multiple uses of the same labelDef-var will all denote the (previous) value.
aps foreach { case (arg, param) => genLoad(arg, locals(param).tk) } // `locals` is known to contain `param` because `genDefDef()` visited `labelDefsAtOrUnder`
// second assign one by one to the LabelDef's variables.
aps.reverse foreach {
case (_, param) =>
// TODO FIXME a "this" param results from tail-call xform. If so, the `else` branch seems perfectly fine. And the `then` branch must be wrong.
if (param.name == nme_THIS) mnode.visitVarInsn(asm.Opcodes.ASTORE, 0)
else locals.store(param)
}
}
def genLoadArguments(args: List[Tree], btpes: List[BType]): Unit = {
(args zip btpes) foreach { case (arg, btpe) => genLoad(arg, btpe) }
}
def genLoadModule(tree: Tree): BType = {
val module = (
if (!tree.symbol.isPackageClass) tree.symbol
else tree.symbol.info.member(nme_PACKAGE) match {
case NoSymbol => abort(s"SI-5604: Cannot use package as value: $tree")
case s => abort(s"SI-5604: found package class where package object expected: $tree")
}
)
lineNumber(tree)
genLoadModule(module)
symInfoTK(module)
}
def genLoadModule(module: Symbol): Unit = {
def inStaticMethod = methSymbol != null && methSymbol.isStaticMember
if (claszSymbol == module.moduleClass && jMethodName != "readResolve" && !inStaticMethod) {
mnode.visitVarInsn(asm.Opcodes.ALOAD, 0)
} else {
val mbt = symInfoTK(module).asClassBType
mnode.visitFieldInsn(
asm.Opcodes.GETSTATIC,
mbt.internalName /* + "$" */ ,
MODULE_INSTANCE_FIELD,
mbt.descriptor // for nostalgics: toTypeKind(module.tpe).descriptor
)
}
}
def genConversion(from: BType, to: BType, cast: Boolean): Unit = {
if (cast) { bc.emitT2T(from, to) }
else {
bc drop from
bc boolconst (from == to)
}
}
def genCast(to: RefBType, cast: Boolean): Unit = {
if (cast) { bc checkCast to }
else { bc isInstance to }
}
/* Is the given symbol a primitive operation? */
def isPrimitive(fun: Tree): Boolean = {
primitives.isPrimitive(fun)
}
/* Generate coercion denoted by "code" */
def genCoercion(code: Int): Unit = {
import ScalaPrimitivesOps._
(code: @switch) match {
case B2B | S2S | C2C | I2I | L2L | F2F | D2D => ()
case _ =>
val from = coercionFrom(code)
val to = coercionTo(code)
bc.emitT2T(from, to)
}
}
def genStringConcat(tree: Tree): BType = {
lineNumber(tree)
liftStringConcat(tree) match {
// Optimization for expressions of the form "" + x. We can avoid the StringBuilder.
case List(Literal(Constant("")), arg) =>
genLoad(arg, ObjectReference)
genCallMethod(String_valueOf, InvokeStyle.Static)
case concatenations =>
bc.genStartConcat
for (elem <- concatenations) {
val loadedElem = elem match {
case Apply(boxOp, value :: Nil) if isBox(boxOp.symbol) =>
// Eliminate boxing of primitive values. Boxing is introduced by erasure because
// there's only a single synthetic `+` method "added" to the string class.
value
case _ => elem
}
val elemType = tpeTK(loadedElem)
genLoad(loadedElem, elemType)
bc.genConcat(elemType)
}
bc.genEndConcat
}
StringRef
}
/**
* Generate a method invocation. If `specificReceiver != null`, it is used as receiver in the
* invocation instruction, otherwise `method.owner`. A specific receiver class is needed to
* prevent an IllegalAccessError, (aladdin bug 455).
*/
def genCallMethod(method: Symbol, style: InvokeStyle, pos: Position = NoPosition, specificReceiver: Symbol = null): BType = {
val methodOwner = method.owner
// the class used in the invocation's method descriptor in the classfile
val receiverClass = {
if (specificReceiver != null)
assert(style.isVirtual || specificReceiver == methodOwner, s"specificReceiver can only be specified for virtual calls. $method - $specificReceiver")
val useSpecificReceiver = specificReceiver != null && !specificReceiver.isBottomClass && !method.isScalaStatic
val receiver = if (useSpecificReceiver) specificReceiver else methodOwner
// workaround for a JVM bug: https://bugs.openjdk.java.net/browse/JDK-8154587
// when an interface method overrides a member of Object (note that all interfaces implicitly
// have superclass Object), the receiver needs to be the interface declaring the override (and
// not a sub-interface that inherits it). example:
// trait T { override def clone(): Object = "" }
// trait U extends T
// class C extends U
// class D { def f(u: U) = u.clone() }
// The invocation `u.clone()` needs `T` as a receiver:
// - using Object is illegal, as Object.clone is protected
// - using U results in a `NoSuchMethodError: U.clone. This is the JVM bug.
// Note that a mixin forwarder is generated, so the correct method is executed in the end:
// class C { override def clone(): Object = super[T].clone() }
val isTraitMethodOverridingObjectMember = {
receiver != methodOwner && // fast path - the boolean is used to pick either of these two, if they are the same it does not matter
style.isVirtual &&
receiver.isEmittedInterface &&
Object_Type.decl(method.name).exists && { // fast path - compute overrideChain on the next line only if necessary
val syms = method.allOverriddenSymbols
!syms.isEmpty && syms.last.owner == ObjectClass
}
}
if (isTraitMethodOverridingObjectMember) methodOwner else receiver
}
receiverClass.info // ensure types the type is up to date; erasure may add lateINTERFACE to traits
val receiverName = internalName(receiverClass)
val jname = method.javaSimpleName.toString
val bmType = asmMethodType(method)
val mdescr = bmType.descriptor
val isInterface = receiverClass.isEmittedInterface
import InvokeStyle._
if (style == Super) {
// DOTTY: this differ from how super-calls in traits are handled in the scalac backend,
// this is intentional but could change in the future, see https://github.com/lampepfl/dotty/issues/5928
bc.invokespecial(receiverName, jname, mdescr, isInterface)
} else {
val opc = style match {
case Static => Opcodes.INVOKESTATIC
case Special => Opcodes.INVOKESPECIAL
case Virtual => if (isInterface) Opcodes.INVOKEINTERFACE else Opcodes.INVOKEVIRTUAL
}
bc.emitInvoke(opc, receiverName, jname, mdescr, isInterface)
}
bmType.returnType
} // end of genCallMethod()
/* Generate the scala ## method. */
def genScalaHash(tree: Tree): BType = {
genLoad(tree, ObjectReference)
genCallMethod(hashMethodSym, InvokeStyle.Static)
}
/*
* Returns a list of trees that each should be concatenated, from left to right.
* It turns a chained call like "a".+("b").+("c") into a list of arguments.
*/
def liftStringConcat(tree: Tree): List[Tree] = tree match {
case tree @ Apply(fun @ Select(larg, method), rarg) =>
if (isPrimitive(fun) &&
primitives.getPrimitive(tree, larg.tpe) == ScalaPrimitivesOps.CONCAT)
liftStringConcat(larg) ::: rarg
else
tree :: Nil
case _ =>
tree :: Nil
}
/* Emit code to compare the two top-most stack values using the 'op' operator. */
private def genCJUMP(success: asm.Label, failure: asm.Label, op: TestOp, tk: BType, targetIfNoJump: asm.Label, negated: Boolean = false): Unit = {
if (targetIfNoJump == success) genCJUMP(failure, success, op.negate(), tk, targetIfNoJump, negated = !negated)
else {
if (tk.isIntSizedType) { // BOOL, BYTE, CHAR, SHORT, or INT
bc.emitIF_ICMP(op, success)
} else if (tk.isRef) { // REFERENCE(_) | ARRAY(_)
bc.emitIF_ACMP(op, success)
} else {
import Primitives._
def useCmpG = if (negated) op == GT || op == GE else op == LT || op == LE
(tk: @unchecked) match {
case LONG => emit(asm.Opcodes.LCMP)
case FLOAT => emit(if (useCmpG) asm.Opcodes.FCMPG else asm.Opcodes.FCMPL)
case DOUBLE => emit(if (useCmpG) asm.Opcodes.DCMPG else asm.Opcodes.DCMPL)
}
bc.emitIF(op, success)
}
if (targetIfNoJump != failure) bc goTo failure
}
}
/* Emits code to compare (and consume) stack-top and zero using the 'op' operator */
private def genCZJUMP(success: asm.Label, failure: asm.Label, op: TestOp, tk: BType, targetIfNoJump: asm.Label, negated: Boolean = false): Unit = {
import Primitives._
if (targetIfNoJump == success) genCZJUMP(failure, success, op.negate(), tk, targetIfNoJump, negated = !negated)
else {
if (tk.isIntSizedType) { // BOOL, BYTE, CHAR, SHORT, or INT
bc.emitIF(op, success)
} else if (tk.isRef) { // REFERENCE(_) | ARRAY(_)
(op: @unchecked) match { // references are only compared with EQ and NE
case EQ => bc emitIFNULL success
case NE => bc emitIFNONNULL success
}
} else {
def useCmpG = if (negated) op == GT || op == GE else op == LT || op == LE
(tk: @unchecked) match {
case LONG =>
emit(asm.Opcodes.LCONST_0)
emit(asm.Opcodes.LCMP)
case FLOAT =>
emit(asm.Opcodes.FCONST_0)
emit(if (useCmpG) asm.Opcodes.FCMPG else asm.Opcodes.FCMPL)
case DOUBLE =>
emit(asm.Opcodes.DCONST_0)
emit(if (useCmpG) asm.Opcodes.DCMPG else asm.Opcodes.DCMPL)
}
bc.emitIF(op, success)
}
if (targetIfNoJump != failure) bc goTo failure
}
}
def testOpForPrimitive(primitiveCode: Int) = (primitiveCode: @switch) match {
case ScalaPrimitivesOps.ID => Primitives.EQ
case ScalaPrimitivesOps.NI => Primitives.NE
case ScalaPrimitivesOps.EQ => Primitives.EQ
case ScalaPrimitivesOps.NE => Primitives.NE
case ScalaPrimitivesOps.LT => Primitives.LT
case ScalaPrimitivesOps.LE => Primitives.LE
case ScalaPrimitivesOps.GT => Primitives.GT
case ScalaPrimitivesOps.GE => Primitives.GE
}
/*
* Generate code for conditional expressions.
* The jump targets success/failure of the test are `then-target` and `else-target` resp.
*/
private def genCond(tree: Tree, success: asm.Label, failure: asm.Label, targetIfNoJump: asm.Label): Unit = {
def genComparisonOp(l: Tree, r: Tree, code: Int): Unit = {
val op = testOpForPrimitive(code)
val nonNullSide = if (ScalaPrimitivesOps.isReferenceEqualityOp(code)) ifOneIsNull(l, r) else null
if (nonNullSide != null) {
// special-case reference (in)equality test for null (null eq x, x eq null)
genLoad(nonNullSide, ObjectReference)
genCZJUMP(success, failure, op, ObjectReference, targetIfNoJump)
} else {
val tk = tpeTK(l).maxType(tpeTK(r))
genLoad(l, tk)
genLoad(r, tk)
genCJUMP(success, failure, op, tk, targetIfNoJump)
}
}
def loadAndTestBoolean() = {
genLoad(tree, BOOL)
genCZJUMP(success, failure, Primitives.NE, BOOL, targetIfNoJump)
}
lineNumber(tree)
tree match {
case tree @ Apply(fun, args) if isPrimitive(fun) =>
import ScalaPrimitivesOps.{ ZNOT, ZAND, ZOR, EQ }
// lhs and rhs of test
lazy val Select(lhs, _) = fun
val rhs = if (args.isEmpty) EmptyTree else args.head // args.isEmpty only for ZNOT
def genZandOrZor(and: Boolean): Unit = {
// reaching "keepGoing" indicates the rhs should be evaluated too (ie not short-circuited).
val keepGoing = new asm.Label
if (and) genCond(lhs, keepGoing, failure, targetIfNoJump = keepGoing)
else genCond(lhs, success, keepGoing, targetIfNoJump = keepGoing)
markProgramPoint(keepGoing)
genCond(rhs, success, failure, targetIfNoJump)
}
primitives.getPrimitive(tree, lhs.tpe) match {
case ZNOT => genCond(lhs, failure, success, targetIfNoJump)
case ZAND => genZandOrZor(and = true)
case ZOR => genZandOrZor(and = false)
case code =>
if (ScalaPrimitivesOps.isUniversalEqualityOp(code) && tpeTK(lhs).isClass) {
// rewrite `==` to null tests and `equals`. not needed for arrays (`equals` is reference equality).
if (code == EQ) genEqEqPrimitive(lhs, rhs, success, failure, targetIfNoJump)
else genEqEqPrimitive(lhs, rhs, failure, success, targetIfNoJump)
} else if (ScalaPrimitivesOps.isComparisonOp(code)) {
genComparisonOp(lhs, rhs, code)
} else
loadAndTestBoolean()
}
case _ => loadAndTestBoolean()
}
} // end of genCond()
/*
* Generate the "==" code for object references. It is equivalent of
* if (l eq null) r eq null else l.equals(r);
*
* @param l left-hand-side of the '=='
* @param r right-hand-side of the '=='
*/
def genEqEqPrimitive(l: Tree, r: Tree, success: asm.Label, failure: asm.Label, targetIfNoJump: asm.Label): Unit = {
/* True if the equality comparison is between values that require the use of the rich equality
* comparator (scala.runtime.Comparator.equals). This is the case when either side of the
* comparison might have a run-time type subtype of java.lang.Number or java.lang.Character.
* When it is statically known that both sides are equal and subtypes of Number of Character,
* not using the rich equality is possible (their own equals method will do ok.)
*/
val mustUseAnyComparator: Boolean = {
val areSameFinals = l.tpe.isFinalType && r.tpe.isFinalType && (l.tpe =:= r.tpe)
!areSameFinals && isMaybeBoxed(l.tpe.typeSymbol) && isMaybeBoxed(r.tpe.typeSymbol)
}
if (mustUseAnyComparator) {
val equalsMethod: Symbol = {
if (l.tpe <:< BoxedNumberClass.tpe) {
if (r.tpe <:< BoxedNumberClass.tpe) externalEqualsNumNum
else if (r.tpe <:< BoxedCharacterClass.tpe) externalEqualsNumChar
else externalEqualsNumObject
} else externalEquals
}
genLoad(l, ObjectReference)
genLoad(r, ObjectReference)
genCallMethod(equalsMethod, InvokeStyle.Static)
genCZJUMP(success, failure, Primitives.NE, BOOL, targetIfNoJump)
}
else {
if (isNull(l)) {
// null == expr -> expr eq null
genLoad(r, ObjectReference)
genCZJUMP(success, failure, Primitives.EQ, ObjectReference, targetIfNoJump)
} else if (isNull(r)) {
// expr == null -> expr eq null
genLoad(l, ObjectReference)
genCZJUMP(success, failure, Primitives.EQ, ObjectReference, targetIfNoJump)
} else if (isNonNullExpr(l)) {
// SI-7852 Avoid null check if L is statically non-null.
genLoad(l, ObjectReference)
genLoad(r, ObjectReference)
genCallMethod(Object_equals, InvokeStyle.Virtual)
genCZJUMP(success, failure, Primitives.NE, BOOL, targetIfNoJump)
} else {
// l == r -> if (l eq null) r eq null else l.equals(r)
val eqEqTempLocal = locals.makeLocal(ObjectReference, nme_EQEQ_LOCAL_VAR.mangledString, Object_Type, r.pos)
val lNull = new asm.Label
val lNonNull = new asm.Label
genLoad(l, ObjectReference)
genLoad(r, ObjectReference)
locals.store(eqEqTempLocal)
bc dup ObjectReference
genCZJUMP(lNull, lNonNull, Primitives.EQ, ObjectReference, targetIfNoJump = lNull)
markProgramPoint(lNull)
bc drop ObjectReference
locals.load(eqEqTempLocal)
genCZJUMP(success, failure, Primitives.EQ, ObjectReference, targetIfNoJump = lNonNull)
markProgramPoint(lNonNull)
locals.load(eqEqTempLocal)
genCallMethod(Object_equals, InvokeStyle.Virtual)
genCZJUMP(success, failure, Primitives.NE, BOOL, targetIfNoJump)
}
}
}
def genSynchronized(tree: Apply, expectedType: BType): BType
def genLoadTry(tree: Try): BType
def genInvokeDynamicLambda(ctor: Symbol, lambdaTarget: Symbol, environmentSize: Int, functionalInterface: Symbol): BType = {
import java.lang.invoke.LambdaMetafactory.FLAG_SERIALIZABLE
debuglog(s"Using invokedynamic rather than `new ${ctor.owner}`")
val generatedType = classBTypeFromSymbol(functionalInterface)
// Lambdas should be serializable if they implement a SAM that extends Serializable or if they
// implement a scala.Function* class.
val isSerializable = functionalInterface.isSerializable || functionalInterface.isFunctionClass
val isInterface = lambdaTarget.owner.isEmittedInterface
val invokeStyle =
if (lambdaTarget.isStaticMember) asm.Opcodes.H_INVOKESTATIC
else if (lambdaTarget.isPrivate || lambdaTarget.isClassConstructor) asm.Opcodes.H_INVOKESPECIAL
else if (isInterface) asm.Opcodes.H_INVOKEINTERFACE
else asm.Opcodes.H_INVOKEVIRTUAL
val targetHandle =
new asm.Handle(invokeStyle,
classBTypeFromSymbol(lambdaTarget.owner).internalName,
lambdaTarget.name.mangledString,
asmMethodType(lambdaTarget).descriptor,
/* itf = */ isInterface)
val (a,b) = lambdaTarget.info.paramTypes.splitAt(environmentSize)
var (capturedParamsTypes, lambdaParamTypes) = if(int.doLabmdasFollowJVMMetafactoryOrder) (a,b) else (b,a)
if (invokeStyle != asm.Opcodes.H_INVOKESTATIC) capturedParamsTypes = lambdaTarget.owner.info :: capturedParamsTypes
// Requires https://github.com/scala/scala-java8-compat on the runtime classpath
val returnUnit = lambdaTarget.info.resultType.typeSymbol == UnitClass
val functionalInterfaceDesc: String = generatedType.descriptor
val desc = capturedParamsTypes.map(tpe => toTypeKind(tpe)).mkString(("("), "", ")") + functionalInterfaceDesc
// TODO specialization
val constrainedType = new MethodBType(lambdaParamTypes.map(p => toTypeKind(p)), toTypeKind(lambdaTarget.tpe.resultType)).toASMType
val abstractMethod = functionalInterface.samMethod()
val methodName = abstractMethod.name.mangledString
val applyN = {
val mt = asmMethodType(abstractMethod)
mt.toASMType
}
val bsmArgs0 = Seq(applyN, targetHandle, constrainedType)
val bsmArgs =
if (isSerializable)
bsmArgs0 :+ Int.box(FLAG_SERIALIZABLE)
else
bsmArgs0
val metafactory =
if (isSerializable)
lambdaMetaFactoryAltMetafactoryHandle // altMetafactory needed to be able to pass the SERIALIZABLE flag
else
lambdaMetaFactoryMetafactoryHandle
bc.jmethod.visitInvokeDynamicInsn(methodName, desc, metafactory, bsmArgs: _*)
generatedType
}
}
}
object BCodeBodyBuilder {
val lambdaMetaFactoryMetafactoryHandle = new Handle(
Opcodes.H_INVOKESTATIC,
"java/lang/invoke/LambdaMetafactory",
"metafactory",
"(Ljava/lang/invoke/MethodHandles$Lookup;Ljava/lang/String;Ljava/lang/invoke/MethodType;Ljava/lang/invoke/MethodType;Ljava/lang/invoke/MethodHandle;Ljava/lang/invoke/MethodType;)Ljava/lang/invoke/CallSite;",
/* itf = */ false)
val lambdaMetaFactoryAltMetafactoryHandle = new Handle(
Opcodes.H_INVOKESTATIC,
"java/lang/invoke/LambdaMetafactory",
"altMetafactory",
"(Ljava/lang/invoke/MethodHandles$Lookup;Ljava/lang/String;Ljava/lang/invoke/MethodType;[Ljava/lang/Object;)Ljava/lang/invoke/CallSite;",
/* itf = */ false)
val lambdaDeserializeBootstrapHandle = new Handle(
Opcodes.H_INVOKESTATIC,
"scala/runtime/LambdaDeserialize",
"bootstrap",
"(Ljava/lang/invoke/MethodHandles$Lookup;Ljava/lang/String;Ljava/lang/invoke/MethodType;[Ljava/lang/invoke/MethodHandle;)Ljava/lang/invoke/CallSite;",
/* itf = */ false)
}
|
som-snytt/dotty
|
compiler/src/dotty/tools/backend/jvm/BCodeBodyBuilder.scala
|
Scala
|
apache-2.0
| 60,722 |
package scjson.codec
import minitest.*
import scjson.ast.*
object JsonDecoderTest extends SimpleTestSuite {
test("JsonDecoder should decode null") {
assertEquals(
JsonCodec decode "null",
Right(JsonValue.Null)
)
}
test("JsonDecoder should decode true") {
assertEquals(
JsonCodec decode "true",
Right(JsonValue.True)
)
}
test("JsonDecoder should decode false") {
assertEquals(
JsonCodec decode "false",
Right(JsonValue.False)
)
}
test("JsonDecoder should decode int 0") {
assertEquals(
JsonCodec decode "0",
Right(JsonValue.fromInt(0))
)
}
test("JsonDecoder should decode int -1") {
assertEquals(
JsonCodec decode "-1",
Right(JsonValue.fromInt(-1))
)
}
test("JsonDecoder should decode int 1") {
assertEquals(
JsonCodec decode "1",
Right(JsonValue.fromInt(1))
)
}
test("JsonDecoder should fail with a single dot") {
assert(
(JsonCodec decode ".").isLeft
)
}
test("JsonDecoder should fail without fraction after dot") {
assert(
(JsonCodec decode "0.").isLeft
)
}
test("JsonDecoder should fail without int before fraction") {
assert(
(JsonCodec decode ".0").isLeft
)
}
test("JsonDecoder should fail with a single dot before exp") {
assert(
(JsonCodec decode ".e1").isLeft
)
}
test("JsonDecoder should fail without int before exp") {
assert(
(JsonCodec decode "e1").isLeft
)
}
test("JsonDecoder should fail without digits after exp") {
assert(
(JsonCodec decode "1e").isLeft
)
}
test("JsonDecoder should decode float 0.0") {
assertEquals(
JsonCodec decode "0.0",
Right(JsonValue.fromInt(0))
)
}
test("JsonDecoder should decode float 1e1") {
assertEquals(
JsonCodec decode "1e1",
Right(JsonValue.fromInt(10))
)
}
test("JsonDecoder should decode float 2e+3") {
assertEquals(
JsonCodec decode "2e+3",
Right(JsonValue.fromInt(2000))
)
}
test("JsonDecoder should decode float 10e-1") {
assertEquals(
JsonCodec decode "10e-1",
Right(JsonValue.fromInt(1))
)
}
test("JsonDecoder should decode float 47.11") {
assertEquals(
JsonCodec decode "47.11",
Right(JsonValue.fromDouble(47.11))
)
}
test("JsonDecoder should fail with a leading zero in the body") {
// (JsonCodec decode "00") must beLike { case Fail(_) => ok }
assert(
(JsonCodec decode "00").isLeft
)
}
test("JsonDecoder should allow a leading zero in the exponent") {
assertEquals(
JsonCodec decode "0E00",
Right(JsonValue.fromInt(0))
)
}
test("JsonDecoder should decode simple strings") {
assertEquals(
JsonCodec decode "\\"hallo, welt!\\"",
Right(JsonValue.fromString("hallo, welt!"))
)
}
test("JsonDecoder should decode string escapes") {
assertEquals(
JsonCodec decode "\\" \\\\\\\\ \\\\/ \\\\t \\\\r \\\\n \\\\f \\\\b \\"",
Right(JsonValue.fromString(" \\\\ / \\t \\r \\n \\f \\b "))
)
}
test("JsonDecoder should decode small hex escapes") {
assertEquals(
JsonCodec decode "\\" \\\\u0123 \\"",
Right(JsonValue.fromString(" \\u0123 "))
)
}
test("JsonDecoder should decode big hex escapes") {
assertEquals(
JsonCodec decode "\\" \\\\uf3e2 \\"",
Right(JsonValue.fromString(" \\uf3e2 "))
)
}
test("JsonDecoder should decode upper case hex escapes") {
assertEquals(
JsonCodec decode "\\" \\\\uBEEF \\"",
Right(JsonValue.fromString(" \\uBEEF "))
)
}
test("JsonDecoder should decode hex escapes outside the basic plane") {
assertEquals(
JsonCodec decode "\\"\\\\uD834\\\\uDD1E\\"",
Right(JsonValue.fromString("\\uD834\\uDD1E"))
)
}
test("JsonDecoder should decode hex escapes outside the basic plane") {
val cs = (new java.lang.StringBuilder appendCodePoint 0x1D11E).toString
assertEquals(
JsonCodec decode "\\"\\\\uD834\\\\uDD1E\\"",
Right(JsonValue.fromString(cs))
)
}
test("JsonDecoder should decode arrays with 0 elements") {
assertEquals(
JsonCodec decode "[]",
Right(JsonValue.fromItems(Seq()))
)
}
test("JsonDecoder should decode arrays with 1 elements") {
assertEquals(
JsonCodec decode "[1]",
Right(JsonValue.fromItems(Seq(JsonValue.fromInt(1))))
)
}
test("JsonDecoder should decode arrays with 2 elements") {
assertEquals(
JsonCodec decode "[1,2]",
Right(JsonValue.fromItems(Seq(JsonValue.fromInt(1),JsonValue.fromInt(2))))
)
}
test("JsonDecoder should allow legal whitespace in arrays") {
assertEquals(
JsonCodec decode "[ ]",
Right(JsonValue.fromItems(Seq()))
)
}
test("JsonDecoder should disallow illegal whitespace in arrays") {
assert(
(JsonCodec decode "[\\u00a0]").isLeft
)
}
test("JsonDecoder should decode objects with 0 elements") {
assertEquals(
JsonCodec decode "{}",
Right(JsonValue.emptyObject)
)
}
test("JsonDecoder should decode objects with 1 elements") {
assertEquals(
JsonCodec decode "{\\"a\\":1}",
Right(JsonValue.fromFields(Seq("a"->JsonValue.fromInt(1))))
)
}
test("JsonDecoder should decode objects with 2 elements") {
assertEquals(
JsonCodec decode "{\\"a\\":1,\\"b\\":2}",
Right(JsonValue.fromFields(Seq("a"->JsonValue.fromInt(1),"b"->JsonValue.fromInt(2))))
)
}
}
|
ritschwumm/scjson
|
modules/codec/src/test/scala/JsonDecoderTest.scala
|
Scala
|
bsd-2-clause
| 5,079 |
/*
* Scala.js (https://www.scala-js.org/)
*
* Copyright EPFL.
*
* Licensed under Apache License 2.0
* (https://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package org.scalajs.ir
import java.security.MessageDigest
/** Wrapper around java.security.MessageDigest.getInstance("SHA-1") */
object SHA1 {
final class DigestBuilder {
private val digest = MessageDigest.getInstance("SHA-1")
def update(b: Byte): Unit =
digest.update(b)
def update(b: Array[Byte]): Unit =
digest.update(b)
def update(b: Array[Byte], off: Int, len: Int): Unit =
digest.update(b, off, len)
def updateUTF8String(str: UTF8String): Unit =
update(str.bytes)
def finalizeDigest(): Array[Byte] =
digest.digest()
}
}
|
scala-js/scala-js
|
ir/jvm/src/main/scala/org/scalajs/ir/SHA1.scala
|
Scala
|
apache-2.0
| 865 |
package org.sisioh.aws4s.cfn.model
import com.amazonaws.services.cloudformation.model.DescribeStackResourcesRequest
import org.sisioh.aws4s.PimpedType
object DescribeStackResourcesRequestFactory {
def create(): DescribeStackResourcesRequest = new DescribeStackResourcesRequest()
}
class RichDescribeStackResourcesRequest(val underlying: DescribeStackResourcesRequest)
extends AnyVal with PimpedType[DescribeStackResourcesRequest] {
def stackNameOpt: Option[String] = Option(underlying.getStackName)
def stackNameOpt_=(value: Option[String]): Unit =
underlying.setStackName(value.orNull)
def withStackNameOpt(value: Option[String]): DescribeStackResourcesRequest =
underlying.withStackName(value.orNull)
// ---
def logicalResourceIdOpt: Option[String] = Option(underlying.getLogicalResourceId)
def logicalResourceIdOpt_=(value: Option[String]): Unit =
underlying.setLogicalResourceId(value.orNull)
def withLogicalResourceIdOpt(value: Option[String]): DescribeStackResourcesRequest =
underlying.withLogicalResourceId(value.orNull)
// ---
def physicalResourceIdOpt: Option[String] = Option(underlying.getPhysicalResourceId)
def physicalResourceIdOpt_=(value: Option[String]): Unit =
underlying.setPhysicalResourceId(value.orNull)
def withPhysicalResourceIdOpt(value: Option[String]): DescribeStackResourcesRequest =
underlying.withPhysicalResourceId(value.orNull)
}
|
everpeace/aws4s
|
aws4s-cfn/src/main/scala/org/sisioh/aws4s/cfn/model/RichDescribeStackResourcesRequest.scala
|
Scala
|
mit
| 1,432 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.v2
import scala.collection.JavaConverters._
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.Row
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.encoders.{ExpressionEncoder, RowEncoder}
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.execution.LeafExecNode
import org.apache.spark.sql.execution.metric.SQLMetrics
import org.apache.spark.sql.sources.v2.reader._
import org.apache.spark.sql.types.StructType
/**
* Physical plan node for scanning data from a data source.
*/
case class DataSourceV2ScanExec(
fullOutput: Seq[AttributeReference],
@transient reader: DataSourceV2Reader) extends LeafExecNode with DataSourceReaderHolder {
override def canEqual(other: Any): Boolean = other.isInstanceOf[DataSourceV2ScanExec]
override def references: AttributeSet = AttributeSet.empty
override lazy val metrics = Map(
"numOutputRows" -> SQLMetrics.createMetric(sparkContext, "number of output rows"))
override protected def doExecute(): RDD[InternalRow] = {
val readTasks: java.util.List[ReadTask[UnsafeRow]] = reader match {
case r: SupportsScanUnsafeRow => r.createUnsafeRowReadTasks()
case _ =>
reader.createReadTasks().asScala.map {
new RowToUnsafeRowReadTask(_, reader.readSchema()): ReadTask[UnsafeRow]
}.asJava
}
val inputRDD = new DataSourceRDD(sparkContext, readTasks)
.asInstanceOf[RDD[InternalRow]]
val numOutputRows = longMetric("numOutputRows")
inputRDD.map { r =>
numOutputRows += 1
r
}
}
}
class RowToUnsafeRowReadTask(rowReadTask: ReadTask[Row], schema: StructType)
extends ReadTask[UnsafeRow] {
override def preferredLocations: Array[String] = rowReadTask.preferredLocations
override def createReader: DataReader[UnsafeRow] = {
new RowToUnsafeDataReader(rowReadTask.createReader, RowEncoder.apply(schema).resolveAndBind())
}
}
class RowToUnsafeDataReader(rowReader: DataReader[Row], encoder: ExpressionEncoder[Row])
extends DataReader[UnsafeRow] {
override def next: Boolean = rowReader.next
override def get: UnsafeRow = encoder.toRow(rowReader.get).asInstanceOf[UnsafeRow]
override def close(): Unit = rowReader.close()
}
|
aray/spark
|
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/DataSourceV2ScanExec.scala
|
Scala
|
apache-2.0
| 3,105 |
package net.kogics.jiva.evolution
import net.kogics.jiva.Predef._
import junit.framework._
import net.kogics.jiva.population._
class TestApgaReplacer extends TestCase {
def testReplacement = {
val currPop = Population(Chromosome("10110"), Chromosome("10001"), Chromosome("00100"),
Chromosome("11000"), Chromosome("11011"), Chromosome("11100"),
Chromosome("10001"), Chromosome("00111"))
val newPop = Population(Chromosome("11111"), Chromosome("10111"), Chromosome("01011"),
Chromosome("01100"), Chromosome("10000"), Chromosome("00000"))
currPop.foreach {chr => chr.props("lifetime") = 10; chr.fitness = Some(9.9)}
currPop(2).props("lifetime") = 1
currPop(7).props("lifetime") = 1
val replacer = new ApgaReplacer[jbool]
val pop2 = replacer.replace(currPop, newPop)
var idx = 0
currPop.foreach {chr =>
if (idx != 2 && idx != 7) assert(pop2.elements.contains(chr))
idx += 1
}
newPop.foreach {chr => assert(pop2.elements.contains(chr))}
}
}
|
milliondreams/jiva-ng
|
src/test/scala/net/kogics/jiva/evolution/TestApgaReplacer.scala
|
Scala
|
gpl-3.0
| 1,078 |
package com.ibm.spark.kernel.api
import com.ibm.spark.kernel.protocol.v5
import com.ibm.spark.kernel.protocol.v5.KernelMessage
import com.ibm.spark.kernel.protocol.v5.kernel.ActorLoader
/**
* Represents the methods available to stream data from the kernel to the
* client.
*/
class StreamMethods(actorLoader: ActorLoader, parentMessage: KernelMessage)
extends StreamMethodsLike
{
private[api] val kmBuilder = v5.KMBuilder()
.withParent(parentMessage)
.withIds(Seq(v5.content.StreamContent.toTypeString))
.withHeader(v5.content.StreamContent.toTypeString)
/**
* Sends all text provided as one stream message to the client.
* @param text The text to wrap in a stream message
*/
override def sendAll(text: String): Unit = {
val streamContent = v5.content.StreamContent(
"stdout", text
)
val kernelMessage = kmBuilder.withContentString(streamContent).build
actorLoader.load(v5.SystemActorType.KernelMessageRelay) ! kernelMessage
}
}
|
codeaudit/spark-kernel
|
kernel/src/main/scala/com/ibm/spark/kernel/api/StreamMethods.scala
|
Scala
|
apache-2.0
| 990 |
/*
* Copyright 2010 Twitter, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License. You may obtain
* a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.twitter.grabbyhands
import java.util.concurrent.atomic.AtomicLong
import scala.collection.Map
import scala.collection.mutable.HashMap
class QueueCounters() {
val bytesRecv = new AtomicLong()
val bytesSent = new AtomicLong()
val messagesRecv = new AtomicLong()
val messagesSent = new AtomicLong()
val kestrelGetTimeouts = new AtomicLong()
val protocolError = new AtomicLong()
val sendCancelled = new AtomicLong()
val recvCancelled = new AtomicLong()
def toMap(): Map[String, Long] = {
val rv = new HashMap[String, Long]()
rv += ("bytesRecv" -> bytesRecv.get)
rv += ("bytesSent" -> bytesSent.get)
rv += ("messagesRecv" -> messagesRecv.get)
rv += ("messagesSent" -> messagesSent.get)
rv += ("kestrelGetTimeouts" -> kestrelGetTimeouts.get)
rv += ("protocolError" -> protocolError.get)
rv += ("sendCancelled" -> sendCancelled.get)
scala.collection.immutable.Map() ++ rv
}
}
|
twitter/grabby-hands
|
src/main/scala/com/twitter/grabbyhands/QueueCounters.scala
|
Scala
|
apache-2.0
| 1,539 |
/*
* Copyright 2018 Analytics Zoo Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.zoo.pipeline.api.keras.layers
import com.intel.analytics.bigdl.nn.{Reverse, Select => TSelect, Sequential => TSequential}
import com.intel.analytics.bigdl.nn.abstractnn.{AbstractModule, Activity}
import com.intel.analytics.bigdl.nn.keras.{Recurrent => BKerasRecurrent}
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.Shape
import com.intel.analytics.zoo.pipeline.api.keras.layers.internal.InternalRecurrent
import scala.reflect.ClassTag
abstract class Recurrent[T: ClassTag](
override val outputDim: Int,
override val returnSequences: Boolean = false,
override val goBackwards: Boolean = false,
override val inputShape: Shape = null)(implicit ev: TensorNumeric[T])
extends BKerasRecurrent[T](outputDim, returnSequences, goBackwards, inputShape) {
val rec = new InternalRecurrent[T]()
override def doBuild(inputShape: Shape): AbstractModule[Tensor[T], Tensor[T], T] = {
val input = inputShape.toSingle().toArray
val model = TSequential[T]()
if (goBackwards) model.add(Reverse(2))
rec.add(buildCell(input))
model.add(rec)
if (!returnSequences) model.add(TSelect(2, -1))
model.asInstanceOf[AbstractModule[Tensor[T], Tensor[T], T]]
}
def getHiddenShape(): Array[Int] = {
require(this.isBuilt(), "Cannot getHiddenShape before call doBuild!")
rec.getHiddenShape()
}
def getHiddenState(): Activity = {
rec.getHiddenState()
}
def setHiddenState(hiddenState: Activity): Unit = {
rec.setHiddenState(hiddenState)
}
def getGradHiddenState(): Activity = {
rec.getGradHiddenState()
}
def setGradHiddenState(gradHiddenState: Activity): Unit = {
rec.setGradHiddenState(gradHiddenState)
}
}
|
intel-analytics/analytics-zoo
|
zoo/src/main/scala/com/intel/analytics/zoo/pipeline/api/keras/layers/Recurrent.scala
|
Scala
|
apache-2.0
| 2,425 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy.security
import java.io.File
import java.security.PrivilegedExceptionAction
import java.util.concurrent.{ScheduledExecutorService, TimeUnit}
import java.util.concurrent.atomic.AtomicReference
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.FileSystem
import org.apache.hadoop.security.{Credentials, UserGroupInformation}
import org.apache.spark.SparkConf
import org.apache.spark.deploy.SparkHadoopUtil
import org.apache.spark.internal.Logging
import org.apache.spark.internal.config._
import org.apache.spark.rpc.RpcEndpointRef
import org.apache.spark.scheduler.cluster.CoarseGrainedClusterMessages.UpdateDelegationTokens
import org.apache.spark.ui.UIUtils
import org.apache.spark.util.ThreadUtils
/**
* Manager for delegation tokens in a Spark application.
*
* This manager has two modes of operation:
*
* 1. When configured with a principal and a keytab, it will make sure long-running apps can run
* without interruption while accessing secured services. It periodically logs in to the KDC with
* user-provided credentials, and contacts all the configured secure services to obtain delegation
* tokens to be distributed to the rest of the application.
*
* Because the Hadoop UGI API does not expose the TTL of the TGT, a configuration controls how often
* to check that a relogin is necessary. This is done reasonably often since the check is a no-op
* when the relogin is not yet needed. The check period can be overridden in the configuration.
*
* New delegation tokens are created once 75% of the renewal interval of the original tokens has
* elapsed. The new tokens are sent to the Spark driver endpoint once it's registered with the AM.
* The driver is tasked with distributing the tokens to other processes that might need them.
*
* 2. When operating without an explicit principal and keytab, token renewal will not be available.
* Starting the manager will distribute an initial set of delegation tokens to the provided Spark
* driver, but the app will not get new tokens when those expire.
*
* It can also be used just to create delegation tokens, by calling the `obtainDelegationTokens`
* method. This option does not require calling the `start` method, but leaves it up to the
* caller to distribute the tokens that were generated.
*/
private[spark] class HadoopDelegationTokenManager(
protected val sparkConf: SparkConf,
protected val hadoopConf: Configuration) extends Logging {
private val deprecatedProviderEnabledConfigs = List(
"spark.yarn.security.tokens.%s.enabled",
"spark.yarn.security.credentials.%s.enabled")
private val providerEnabledConfig = "spark.security.credentials.%s.enabled"
private val principal = sparkConf.get(PRINCIPAL).orNull
private val keytab = sparkConf.get(KEYTAB).orNull
require((principal == null) == (keytab == null),
"Both principal and keytab must be defined, or neither.")
require(keytab == null || new File(keytab).isFile(), s"Cannot find keytab at $keytab.")
private val delegationTokenProviders = loadProviders()
logDebug("Using the following builtin delegation token providers: " +
s"${delegationTokenProviders.keys.mkString(", ")}.")
private var renewalExecutor: ScheduledExecutorService = _
private val driverRef = new AtomicReference[RpcEndpointRef]()
/** Set the endpoint used to send tokens to the driver. */
def setDriverRef(ref: RpcEndpointRef): Unit = {
driverRef.set(ref)
}
/** @return Whether delegation token renewal is enabled. */
def renewalEnabled: Boolean = principal != null
/**
* Start the token renewer. Requires a principal and keytab. Upon start, the renewer will:
*
* - log in the configured principal, and set up a task to keep that user's ticket renewed
* - obtain delegation tokens from all available providers
* - send the tokens to the driver, if it's already registered
* - schedule a periodic task to update the tokens when needed.
*
* @return The newly logged in user.
*/
def start(): UserGroupInformation = {
require(renewalEnabled, "Token renewal must be enabled to start the renewer.")
renewalExecutor =
ThreadUtils.newDaemonSingleThreadScheduledExecutor("Credential Renewal Thread")
val originalCreds = UserGroupInformation.getCurrentUser().getCredentials()
val ugi = doLogin()
val tgtRenewalTask = new Runnable() {
override def run(): Unit = {
ugi.checkTGTAndReloginFromKeytab()
}
}
val tgtRenewalPeriod = sparkConf.get(KERBEROS_RELOGIN_PERIOD)
renewalExecutor.scheduleAtFixedRate(tgtRenewalTask, tgtRenewalPeriod, tgtRenewalPeriod,
TimeUnit.SECONDS)
val creds = obtainTokensAndScheduleRenewal(ugi)
ugi.addCredentials(creds)
val driver = driverRef.get()
if (driver != null) {
val tokens = SparkHadoopUtil.get.serialize(creds)
driver.send(UpdateDelegationTokens(tokens))
}
// Transfer the original user's tokens to the new user, since it may contain needed tokens
// (such as those user to connect to YARN). Explicitly avoid overwriting tokens that already
// exist in the current user's credentials, since those were freshly obtained above
// (see SPARK-23361).
val existing = ugi.getCredentials()
existing.mergeAll(originalCreds)
ugi.addCredentials(existing)
ugi
}
def stop(): Unit = {
if (renewalExecutor != null) {
renewalExecutor.shutdown()
}
}
/**
* Fetch new delegation tokens for configured services, storing them in the given credentials.
* Tokens are fetched for the current logged in user.
*
* @param creds Credentials object where to store the delegation tokens.
* @return The time by which the tokens must be renewed.
*/
def obtainDelegationTokens(creds: Credentials): Long = {
delegationTokenProviders.values.flatMap { provider =>
if (provider.delegationTokensRequired(sparkConf, hadoopConf)) {
provider.obtainDelegationTokens(hadoopConf, sparkConf, creds)
} else {
logDebug(s"Service ${provider.serviceName} does not require a token." +
s" Check your configuration to see if security is disabled or not.")
None
}
}.foldLeft(Long.MaxValue)(math.min)
}
// Visible for testing.
def isProviderLoaded(serviceName: String): Boolean = {
delegationTokenProviders.contains(serviceName)
}
protected def isServiceEnabled(serviceName: String): Boolean = {
val key = providerEnabledConfig.format(serviceName)
deprecatedProviderEnabledConfigs.foreach { pattern =>
val deprecatedKey = pattern.format(serviceName)
if (sparkConf.contains(deprecatedKey)) {
logWarning(s"${deprecatedKey} is deprecated. Please use ${key} instead.")
}
}
val isEnabledDeprecated = deprecatedProviderEnabledConfigs.forall { pattern =>
sparkConf
.getOption(pattern.format(serviceName))
.map(_.toBoolean)
.getOrElse(true)
}
sparkConf
.getOption(key)
.map(_.toBoolean)
.getOrElse(isEnabledDeprecated)
}
/**
* List of file systems for which to obtain delegation tokens. The base implementation
* returns just the default file system in the given Hadoop configuration.
*/
protected def fileSystemsToAccess(): Set[FileSystem] = {
Set(FileSystem.get(hadoopConf))
}
private def scheduleRenewal(delay: Long): Unit = {
val _delay = math.max(0, delay)
logInfo(s"Scheduling login from keytab in ${UIUtils.formatDuration(delay)}.")
val renewalTask = new Runnable() {
override def run(): Unit = {
updateTokensTask()
}
}
renewalExecutor.schedule(renewalTask, _delay, TimeUnit.MILLISECONDS)
}
/**
* Periodic task to login to the KDC and create new delegation tokens. Re-schedules itself
* to fetch the next set of tokens when needed.
*/
private def updateTokensTask(): Unit = {
try {
val freshUGI = doLogin()
val creds = obtainTokensAndScheduleRenewal(freshUGI)
val tokens = SparkHadoopUtil.get.serialize(creds)
val driver = driverRef.get()
if (driver != null) {
logInfo("Updating delegation tokens.")
driver.send(UpdateDelegationTokens(tokens))
} else {
// This shouldn't really happen, since the driver should register way before tokens expire.
logWarning("Delegation tokens close to expiration but no driver has registered yet.")
SparkHadoopUtil.get.addDelegationTokens(tokens, sparkConf)
}
} catch {
case e: Exception =>
val delay = TimeUnit.SECONDS.toMillis(sparkConf.get(CREDENTIALS_RENEWAL_RETRY_WAIT))
logWarning(s"Failed to update tokens, will try again in ${UIUtils.formatDuration(delay)}!" +
" If this happens too often tasks will fail.", e)
scheduleRenewal(delay)
}
}
/**
* Obtain new delegation tokens from the available providers. Schedules a new task to fetch
* new tokens before the new set expires.
*
* @return Credentials containing the new tokens.
*/
private def obtainTokensAndScheduleRenewal(ugi: UserGroupInformation): Credentials = {
ugi.doAs(new PrivilegedExceptionAction[Credentials]() {
override def run(): Credentials = {
val creds = new Credentials()
val nextRenewal = obtainDelegationTokens(creds)
// Calculate the time when new credentials should be created, based on the configured
// ratio.
val now = System.currentTimeMillis
val ratio = sparkConf.get(CREDENTIALS_RENEWAL_INTERVAL_RATIO)
val delay = (ratio * (nextRenewal - now)).toLong
scheduleRenewal(delay)
creds
}
})
}
private def doLogin(): UserGroupInformation = {
logInfo(s"Attempting to login to KDC using principal: $principal")
val ugi = UserGroupInformation.loginUserFromKeytabAndReturnUGI(principal, keytab)
logInfo("Successfully logged into KDC.")
ugi
}
private def loadProviders(): Map[String, HadoopDelegationTokenProvider] = {
val providers = Seq(new HadoopFSDelegationTokenProvider(fileSystemsToAccess)) ++
safeCreateProvider(new HiveDelegationTokenProvider) ++
safeCreateProvider(new HBaseDelegationTokenProvider)
// Filter out providers for which spark.security.credentials.{service}.enabled is false.
providers
.filter { p => isServiceEnabled(p.serviceName) }
.map { p => (p.serviceName, p) }
.toMap
}
private def safeCreateProvider(
createFn: => HadoopDelegationTokenProvider): Option[HadoopDelegationTokenProvider] = {
try {
Some(createFn)
} catch {
case t: Throwable =>
logDebug(s"Failed to load built in provider.", t)
None
}
}
}
|
ahnqirage/spark
|
core/src/main/scala/org/apache/spark/deploy/security/HadoopDelegationTokenManager.scala
|
Scala
|
apache-2.0
| 11,578 |
package com.alanjz.microstrike
import java.awt.Color
import scala.util.Random
package object team {
trait TeamGroup {
val group : TeamGroup
}
trait GroupA extends TeamGroup {
override val group = this
}
trait GroupB extends TeamGroup {
override val group = this
}
sealed abstract class Team(color : Color) extends TeamGroup {
def toAwtColor = color
}
case object Red extends Team(Color.red) with GroupB
case object Blue extends Team(Color.blue) with GroupA
case object Green extends Team(Color.green) with GroupA
case object Yellow extends Team(Color.yellow) with GroupB
case object Pink extends Team(Color.pink) with GroupA
case object Orange extends Team(new Color(255, 114, 20)) with GroupB
case object White extends Team(Color.white) with GroupA
case object Cyan extends Team(Color.cyan) with GroupB
case object Magenta extends Team(Color.magenta) with GroupB
case object Brown extends Team(new Color(150, 75, 0)) with GroupA
object Team {
implicit def toAwtColor(lhs : Team) : Color = lhs.toAwtColor
def randomTeam = Random.nextInt(10) match {
case 0 => Red
case 1 => Blue
case 2 => Green
case 3 => Yellow
case 4 => Pink
case 5 => Orange
case 6 => White
case 7 => Cyan
case 8 => Magenta
case 9 => Brown
}
}
}
|
spacenut/microstrike
|
src/com/alanjz/microstrike/team/package.scala
|
Scala
|
gpl-2.0
| 1,351 |
package spark.debugger
import scala.collection.immutable
import scala.collection.mutable
import spark.Logging
/**
* Verifies a stream of checksum events and detects mismatches, which occur when the same entity has
* multiple checksums.
*/
class ChecksumVerifier extends Logging {
/** Map from entities to the checksum events associated with that entity. */
val checksums = new mutable.HashMap[Any, immutable.HashSet[ChecksumEvent]]
/**
* List of checksum events that had the same checksum as an existing event associated with the
* same entity.
*/
val mismatches = new mutable.ArrayBuffer[ChecksumEvent]
def verify(c: ChecksumEvent) {
if (checksums.contains(c.key)) {
if (!checksums(c.key).contains(c)) {
if (checksums(c.key).exists(_.mismatch(c))) reportMismatch(c)
checksums(c.key) += c
}
} else {
checksums.put(c.key, immutable.HashSet(c))
}
}
private def reportMismatch(c: ChecksumEvent) {
logWarning(c.warningString)
mismatches += c
}
}
|
ankurdave/arthur
|
core/src/main/scala/spark/debugger/ChecksumVerifier.scala
|
Scala
|
bsd-3-clause
| 1,029 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.