code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
package code
package service
import code.commons.TimeUtils
import code.model.{Task, User}
import code.service.TaskItemService.{IntervalQuery, getTaskItems}
import code.util.ListToReducedMap._
import com.github.nscala_time.time.Imports._
import net.liftweb.common._
import net.liftweb.mapper.By
import org.joda.time.{DateTime, Duration, Interval, LocalDate, _}
import scala.language.postfixOps
/**
* Reponsible for creating report data.
*
* @author David Csakvari
*/
object ReportService {
/**
* Calculates the number of milliseconds to be subtracted from leave time for a day,
* based on user preferences and the given offtime.
*/
def calculateTimeRemovalFromLeaveTime(offtime: Long): Long =
User.currentUser filter (_.subtractBreaks.get) map (_ => offtime) getOrElse 0L
/**
* Processes the TaskItems in the given month defined by the offset (in days) from the current day,
* and returns data that can be used in time sheets.
* @return a sequence of (dayOfMonth: String, arriveTime: String, leaveTime: String) tuples. Arrive and leave time strings are in hh:mm format.
*/
def getTimesheetData(i: IntervalQuery, user: Box[User]): List[(Int,String,String,Double)] = {
(for {
(d, ts) <- getTaskItems(i, user) groupBy startDate mapValues (_ sortBy startDate)
if trim(ts).nonEmpty
} yield {
val breaks = calculateTimeRemovalFromLeaveTime {
trim(ts) filter (_.task.isEmpty) map (_.duration.getMillis) sum
}
val first = trim(ts).headOption
val last = ts.lastOption
val arrive = if (first.isEmpty) {
Left("-")
} else {
Right(first.get.taskItem.start.get)
}
val leave = if (last.isEmpty) {
Left("-")
} else {
if (last.get.taskItem.task.get == 0) {
Right(last.get.taskItem.start.get - breaks)
} else {
Left("...")
}
}
def transform(e: Either[String, Long]) = e match {
case Right(time) => new LocalTime(time).toString(TimeUtils.TIME_FORMAT)
case Left(err) => err
}
val sum = (arrive, leave) match {
case (Right(arriveTime), Right(leaveTime)) => (leaveTime - arriveTime) / (1000.0d * 60.0d * 60.0d)
case _ => 0.0d
}
(d.getDayOfMonth, transform(arrive), transform(leave), sum)
}).toList.sortBy(_._1)
}
def startDate(t: TaskItemWithDuration): LocalDate = new LocalDate(t.taskItem.start.get)
type TaskSheet = Map[ReadablePartial, Map[TaskSheetItem,Duration]]
def taskSheetData(i: IntervalQuery, u: Box[User], taskFilter: String = ""): TaskSheet = {
val ds = dates(i.interval, i.scale).map(d => d -> (Nil: List[TaskItemWithDuration])).toMap
(ds ++ taskItemsExceptPause(i, u, taskFilter).groupBy(t => i.scale(new LocalDate(t.taskItem.start.get))))
.mapValues(_.map(taskSheetItemWithDuration(_)).leftReducedMap(Duration.ZERO)(_ + _))
}
def getCollaborators(user: User) = {
val interval = IntervalQuery(new Interval(IntervalQuery.thisMonth().interval.start.minusMonths(3), IntervalQuery.thisMonth().interval.end), d => new YearMonth(d))
val collaborators = for (otherUser <- User.findAll if user != otherUser) yield (otherUser, collaboration(interval, user, otherUser))
collaborators.sortWith((item1, item2) => {
item1._2.toList.map(item => item._2.plus(item._3).getMillis).sum > item2._2.toList.map(item => item._2.plus(item._3).getMillis).sum
})
}
def collaboration(i: IntervalQuery, u1: User, u2: User) = {
val tasksheet1 = taskSheetData(i, Full(u1))
val tasksheet2 = taskSheetData(i, Full(u2))
val commonTasks = for (
(interval1, items1) <- tasksheet1;
(interval2, items2) <- tasksheet2;
i1 <- items1;
i2 <- items2
if interval1 == interval2 && i1._1.name == i2._1.name
) yield (i1._1, i1._2, i2._2)
commonTasks
.groupBy(_._1)
.mapValues(i => i.reduce((acc, i2) => (i2._1, i2._2.plus(acc._2), i2._3.plus(acc._3))))
.values
.toList
.sortWith((item1, item2) => (item1._2.plus(item1._3)).compareTo((item2._2.plus(item2._3))) > 0)
}
def dates(i: Interval, f: LocalDate => ReadablePartial): List[ReadablePartial] = days(i).map(f).distinct
def days(i: Interval): List[LocalDate] =
{
if (i.contains(DateTime.now())) 0 to i.withEnd(DateTime.now()).toPeriod(PeriodType.days).getDays
else 0 until i.toPeriod(PeriodType.days).getDays
} map (i.start.toLocalDate.plusDays(_)) toList
def taskItemsExceptPause(i: IntervalQuery, u: Box[User], taskFilter: String): List[TaskItemWithDuration] =
getTaskItems(i, u, true) filter (t => (if (taskFilter == "") t.taskName != "" else t.taskName != "" && t.fullName.toLowerCase().contains(taskFilter.toLowerCase())))
def taskSheetItemWithDuration(t: TaskItemWithDuration): (TaskSheetItem, Duration) =
(TaskSheetItem(t.task map (_.id.get) getOrElse 0L, t.fullName), new Duration(t.duration))
/**
* Removes the Pause tasks from the begining and the end of the sequence.
*/
def trim(in: List[TaskItemWithDuration]): List[TaskItemWithDuration] = {
in.dropWhile(_.taskItem.task.get == 0).reverse.dropWhile(_.taskItem.task.get == 0).reverse
}
}
|
dodie/time-admin
|
src/main/scala/code/service/ReportService.scala
|
Scala
|
apache-2.0
| 5,230 |
trait X[R <: Z, Z >: X[R, R] <: X[R, R]] // error // error
class Z extends X[Z, Z]
|
som-snytt/dotty
|
tests/neg/i4369b.scala
|
Scala
|
apache-2.0
| 83 |
/**
* Copyright (C) 2010 Orbeon, Inc.
*
* This program is free software; you can redistribute it and/or modify it under the terms of the
* GNU Lesser General Public License as published by the Free Software Foundation; either version
* 2.1 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Lesser General Public License for more details.
*
* The full text of the license is available at http://www.gnu.org/copyleft/lesser.html
*/
package org.orbeon.oxf.common
import java.io.{File, FileInputStream}
import java.security.SignatureException
import org.orbeon.dom.Document
import org.orbeon.dom.io.XMLWriter
import org.orbeon.errorified.Exceptions
import org.orbeon.io.IOUtils._
import org.orbeon.oxf.pipeline.InitUtils.withPipelineContext
import org.orbeon.oxf.processor.ProcessorImpl.{INPUT_DATA, OUTPUT_DATA}
import org.orbeon.oxf.processor.generator.DOMGenerator
import org.orbeon.oxf.processor.{DOMSerializer, SignatureVerifierProcessor}
import org.orbeon.oxf.resources.{ResourceManagerWrapper, ResourceNotFoundException}
import org.orbeon.oxf.util.PathUtils._
import org.orbeon.oxf.util.PipelineUtils._
import org.orbeon.oxf.util.StringUtils._
import org.orbeon.oxf.util.{DateUtils, DateUtilsUsingSaxon}
import org.orbeon.oxf.xml.ParserConfiguration
import org.orbeon.oxf.xml.dom.IOSupport
import scala.util.Try
import scala.util.control.NonFatal
class PEVersion extends Version {
import org.orbeon.oxf.common.PEVersion._
import org.orbeon.oxf.common.Version._
// Check license file during construction
// If the license doesn't pass, throw an exception so that processing is interrupted
locally {
def licenseError(message: String, throwable: Option[Throwable] = None) = {
val fullMessage = VersionString + ": " + message + ". " + LicenseMessage
logger.error(fullMessage)
throw throwable getOrElse new OXFException(fullMessage)
}
val licenseInfo =
try tryReadLicense flatMap tryGetSignedData flatMap LicenseInfo.tryApply get
catch {
case NonFatal(t) =>
Exceptions.getRootThrowable(t) match {
case _: ResourceNotFoundException =>
licenseError("License file not found")
case _: SignatureException =>
licenseError("Invalid license file signature")
case NonFatal(t) =>
licenseError("Error loading license file", Some(t))
}
}
licenseInfo.formattedSubscriptionEnd match {
case Some(end) =>
// There is a subscription end date so we check that the build date is prior to that
// NOTE: Don't check against the current date as we don't want to depend on that for production licenses
if (licenseInfo.isBuildAfterSubscriptionEnd)
licenseError(s"Subscription ended on: $end, Orbeon Forms build dates from: ${licenseInfo.formattedBuildDate.get}")
case None =>
// There is no subscription end date so we check against the version
if (licenseInfo.isBadVersion)
licenseError(s"License version doesn't match. License version is: ${licenseInfo.version.get}, Orbeon Forms version is: $VersionNumber")
}
// Check expiration against the current date (for non-production licenses)
if (licenseInfo.isExpired)
licenseError(s"License has expired on ${licenseInfo.formattedExpiration.get}")
logger.info(s"This installation of $VersionString is licensed to: ${licenseInfo.toString}")
}
def requirePEFeature(featureName: String) = ()
def isPEFeatureEnabled(featureRequested: Boolean, featureName: String) = featureRequested
}
private object PEVersion {
import org.orbeon.oxf.common.Version._
val LicensePath = "/config/license.xml"
val LicenseURL = "oxf:" + LicensePath
val OrbeonPublicKeyURL = "oxf:/config/orbeon-public.xml"
val LicenseMessage = "Please make sure a proper license file is placed under WEB-INF/resources" + LicensePath + '.'
def isVersionExpired(currentVersion: String, licenseVersion: String): Boolean =
Version.compare(currentVersion, licenseVersion) match {
case Some(comparison) => comparison > 0
case None => true
}
private val MatchTimestamp = """(.*[^\\d]|)(\\d{4})(\\d{2})(\\d{2})\\d{4}([^\\d].*|)""".r
def dateFromVersionNumber(currentVersion: String): Option[Long] = (
Some(currentVersion)
collect { case MatchTimestamp(_, year, month, day, _) => year + '-' + month + '-' + day }
map DateUtilsUsingSaxon.parseISODateOrDateTime
)
case class LicenseInfo(
versionNumber : String,
licensor : String,
licensee : String,
organization : String,
email : String,
issued : String,
version : Option[String],
expiration : Option[Long],
subscriptionEnd : Option[Long]
) {
def isBadVersion = version exists (isVersionExpired(versionNumber, _))
def isExpired = expiration exists (System.currentTimeMillis() > _)
def isBuildAfterSubscriptionEnd = subscriptionEnd exists (end => dateFromVersionNumber(versionNumber) exists (_ > end))
def formattedExpiration = expiration map DateUtils.formatRfc1123DateTimeGmt
def formattedSubscriptionEnd = subscriptionEnd map DateUtils.formatRfc1123DateTimeGmt
def formattedBuildDate = dateFromVersionNumber(versionNumber) map DateUtils.formatRfc1123DateTimeGmt
override def toString = {
val versionString = version map (" for version " + _) getOrElse ""
val subscriptionEndString = formattedSubscriptionEnd map (" with subscription ending on " + _) getOrElse ""
val expiresString = formattedExpiration map (" and expires on " + _) getOrElse ""
licensee + " / " + organization + " / " + email + versionString + subscriptionEndString + expiresString
}
}
object LicenseInfo {
def apply(licenceDocument: Document): LicenseInfo = {
import org.orbeon.oxf.xml.XPathUtils.selectStringValueNormalize
def select(s: String) = selectStringValueNormalize(licenceDocument, "/license/" + s)
val licensor = select("licensor")
val licensee = select("licensee")
val organization = select("organization")
val email = select("email")
val issued = select("issued")
val versionOpt = select("version").trimAllToOpt
val expirationOpt = select("expiration").trimAllToOpt map DateUtilsUsingSaxon.parseISODateOrDateTime
val subscriptionEndOpt = select("subscription-end").trimAllToOpt map DateUtilsUsingSaxon.parseISODateOrDateTime
LicenseInfo(VersionNumber, licensor, licensee, organization, email, issued, versionOpt, expirationOpt, subscriptionEndOpt)
}
def tryApply(licenceDocument: Document): Try[LicenseInfo] = Try(apply(licenceDocument))
}
def tryReadLicense: Try[Document] = {
def fromResourceManager =
Try(ResourceManagerWrapper.instance.getContentAsOrbeonDom(LicensePath))
def fromHomeDirectory =
Try {
val path = System.getProperty("user.home").dropTrailingSlash + "/.orbeon/license.xml"
useAndClose(new FileInputStream(new File(path))) { is =>
IOSupport.readOrbeonDom(is, path, ParserConfiguration.Plain)
}
}
fromResourceManager orElse fromHomeDirectory
}
def tryGetSignedData(rawDocument: Document): Try[Document] =
Try {
val key = createURLGenerator(OrbeonPublicKeyURL)
// Remove blank spaces as that's the way it was signed
val inputLicenseDocument =
IOSupport.readOrbeonDom(rawDocument.getRootElement.serializeToString(XMLWriter.CompactFormat))
// Connect pipeline
val serializer = {
val licence = new DOMGenerator(inputLicenseDocument, "license", DOMGenerator.ZeroValidity, LicenseURL)
val verifierProcessor = new SignatureVerifierProcessor
connect(licence, OUTPUT_DATA, verifierProcessor, INPUT_DATA)
connect(key, OUTPUT_DATA, verifierProcessor, SignatureVerifierProcessor.INPUT_PUBLIC_KEY)
val result = new DOMSerializer
connect(verifierProcessor, OUTPUT_DATA, result, INPUT_DATA)
result
}
// Execute pipeline to obtain license document
withPipelineContext { pipelineContext =>
serializer.reset(pipelineContext)
serializer.runGetDocument(pipelineContext)
}
}
}
|
orbeon/orbeon-forms
|
src/main/scala/org/orbeon/oxf/common/PEVersion.scala
|
Scala
|
lgpl-2.1
| 8,732 |
/*
* Copyright (c) 2014-2021 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.eval
import cats.effect.laws.discipline.{ConcurrentEffectTests, ConcurrentTests}
import cats.kernel.laws.discipline.MonoidTests
import cats.laws.discipline.{CoflatMapTests, CommutativeApplicativeTests, ParallelTests, SemigroupKTests}
object TypeClassLawsForTaskSuite
extends BaseTypeClassLawsForTaskSuite()(
Task.defaultOptions.disableAutoCancelableRunLoops
)
object TypeClassLawsForTaskAutoCancelableSuite
extends BaseTypeClassLawsForTaskSuite()(
Task.defaultOptions.enableAutoCancelableRunLoops
)
class BaseTypeClassLawsForTaskSuite(implicit opts: Task.Options) extends BaseLawsSuite {
checkAllAsync("CoflatMap[Task]") { implicit ec =>
CoflatMapTests[Task].coflatMap[Int, Int, Int]
}
checkAllAsync("Concurrent[Task]") { implicit ec =>
ConcurrentTests[Task].concurrent[Int, Int, Int]
}
checkAllAsync("ConcurrentEffect[Task]") { implicit ec =>
ConcurrentEffectTests[Task].concurrentEffect[Int, Int, Int]
}
checkAllAsync("CommutativeApplicative[Task.Par]") { implicit ec =>
CommutativeApplicativeTests[Task.Par].commutativeApplicative[Int, Int, Int]
}
checkAllAsync("Parallel[Task, Task.Par]") { implicit ec =>
ParallelTests[Task, Task.Par].parallel[Int, Int]
}
checkAllAsync("Monoid[Task[Int]]") { implicit ec =>
MonoidTests[Task[Int]].monoid
}
checkAllAsync("SemigroupK[Task[Int]]") { implicit ec =>
SemigroupKTests[Task].semigroupK[Int]
}
}
|
monifu/monifu
|
monix-eval/shared/src/test/scala/monix/eval/TypeClassLawsForTaskSuite.scala
|
Scala
|
apache-2.0
| 2,115 |
object A:
import scala.quoted.Type.valueOfTuple
object B:
import scala.quoted.Type.*
object C:
import Tuple.canEqualTuple
object D:
import Tuple.given
object E:
import Selectable.WithoutPreciseParameterTypes
|
dotty-staging/dotty
|
tests/run/forwardCompat-unusedImport/Imports_1_r3.0.scala
|
Scala
|
apache-2.0
| 222 |
/*
* Copyright 2014–2018 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.api.datasource
import slamdata.Predef._
import quasar.pkg.tests._
import eu.timepit.refined.api.Refined
import eu.timepit.refined.auto._
import eu.timepit.refined.numeric.{Positive => RPositive}
import eu.timepit.refined.scalacheck.numeric._
import org.scalacheck.Gen
trait DatasourceTypeGenerator {
implicit val datasourceTypeArbitrary: Arbitrary[DatasourceType] =
Arbitrary(for {
name <- genName
ver <- chooseRefinedNum[Refined, Long, RPositive](1L, 100L)
} yield DatasourceType(name, ver))
private def genName: Gen[DatasourceType.Name] =
for {
cs <- Gen.listOf(Gen.frequency(
100 -> Gen.alphaNumChar,
3 -> Gen.const('-')))
c <- Gen.alphaNumChar
} yield Refined.unsafeApply[String, DatasourceType.NameP]((c :: cs).mkString)
}
object DatasourceTypeGenerator extends DatasourceTypeGenerator
|
slamdata/slamengine
|
api/src/test/scala/quasar/api/datasource/DatasourceTypeGenerator.scala
|
Scala
|
apache-2.0
| 1,479 |
package org.smartnut
import org.apache.spark._
import org.apache.spark.SparkContext._
import org.apache.spark.scheduler.{SplitInfo, StatsReportListener}
import org.apache.spark.{AccumulatorParam, Logging, SparkConf, SparkContext}
object SparkQuery extends Logging{
var sparkContext: SparkContext = _
def init() {
if (sparkContext == null) {
log.info("Initializing SparkContext")
val sparkMaster = Option(System.getenv("SPARK_MASTER")).getOrElse("local[3]")
log.info(s"setting $sparkMaster as spark master")
log.info("SPARK_HOME " + System.getenv("SPARK_HOME"))
val jobName: String = "TestSparkonPlay::" + java.net.InetAddress.getLocalHost.getHostName
sparkContext = {
val sparkConf = new SparkConf()
.setMaster(sparkMaster)
.setAppName(jobName)
.setJars(List("file:"+System.getProperty("user.dir") + "/SparkQueryLibrary/target/scala-2.10/sparkquerylibrary_2.10-1.0-SNAPSHOT.jar"))
Option(System.getenv("SPARK_HOME")).foreach(sparkConf.setSparkHome)
new SparkContext(sparkConf)
}
sparkContext.addSparkListener(new StatsReportListener())
log.info("Initialized SparkContext")
}
}
def sumRange(start:Int, end:Int) :Long = {
init()
sparkContext.parallelize( 1 to 100 ).reduce( _+_ )
}
}
|
smartnut007/SparkPlayApp
|
SparkQueryLibrary/src/main/scala/com/smartnut/SparkQuery.scala
|
Scala
|
apache-2.0
| 1,348 |
/*
* Copyright 2014–2018 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.physical.marklogic.xquery
import slamdata.Predef._
import quasar.physical.marklogic.xquery.syntax._
import monocle.macros.Lenses
import scalaz._
import scalaz.syntax.show._
import xml.name._
@Lenses
final case class NamespaceDecl(ns: Namespace) {
def render: String = s"declare namespace ${ns.prefix.shows} = ${ns.uri.xs.shows}"
}
object NamespaceDecl {
implicit val order: Order[NamespaceDecl] =
Order.orderBy(_.ns)
implicit val show: Show[NamespaceDecl] =
Show.shows(nd => s"NamespaceDecl(${nd.render})")
}
|
jedesah/Quasar
|
marklogic/src/main/scala/quasar/physical/marklogic/xquery/NamespaceDecl.scala
|
Scala
|
apache-2.0
| 1,148 |
/*
* Copyright 2014–2017 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.mimir
import quasar.blueeyes._, json._
import quasar.precog.TestSupport._
import quasar.precog.common._
import quasar.precog.common.accounts._
import quasar.precog.common.security._
import quasar.yggdrasil.bytecode._
import quasar.yggdrasil.execution.EvaluationContext
import quasar.yggdrasil.table._
import quasar.yggdrasil.vfs._
import quasar.yggdrasil.util._
import scalaz._, Scalaz._, Validation._
import java.io.File
import scala.collection.mutable
trait EvaluatorSpecification[M[+_]] extends Specification with EvaluatorTestSupport[M] {
def M = Need.need.asInstanceOf[scalaz.Monad[M] with scalaz.Comonad[M]]
}
trait EvaluatorTestSupport[M[+_]] extends StdLibEvaluatorStack[M]
with BaseBlockStoreTestModule[M]
with IdSourceScannerModule
with SpecificationHelp { outer =>
def Evaluator[N[+_]](N0: Monad[N])(implicit mn: M ~> N, nm: N ~> M) =
new Evaluator[N](N0)(mn,nm) {
val report = new LoggingQueryLogger[N, instructions.Line] with ExceptionQueryLogger[N, instructions.Line] with TimingQueryLogger[N, instructions.Line] {
val M = N0
}
def freshIdScanner = outer.freshIdScanner
}
private val groupId = new java.util.concurrent.atomic.AtomicInteger
def newGroupId = groupId.getAndIncrement
def testAccount = AccountDetails("00001", "[email protected]", dateTime.now, "testAPIKey", Path.Root, AccountPlan.Free)
val defaultEvaluationContext = EvaluationContext("testAPIKey", testAccount, Path.Root, Path.Root, dateTime.now)
val projections = Map.empty[Path, Projection]
def vfs = sys.error("VFS metadata not supported in test.")
trait TableCompanion extends BaseBlockStoreTestTableCompanion {
override def load(table: Table, apiKey: APIKey, jtpe: JType) = EitherT {
table.toJson map { events =>
val eventsV = events.toStream.traverse[Validation[ResourceError, ?], Stream[JValue]] {
case JString(pathStr) => Validation.success {
indexLock synchronized { // block the WHOLE WORLD
val path = Path(pathStr)
val index = initialIndices get path getOrElse {
initialIndices += (path -> currentIndex)
currentIndex
}
val prefix = "filesystem"
val target = path.path.replaceAll("/$", ".json").replaceAll("^/" + prefix, prefix)
val src = if (target startsWith prefix)
io.Source.fromFile(new File(target.substring(prefix.length)))
else
io.Source.fromInputStream(getClass.getResourceAsStream(target))
val parsed: Stream[JValue] = src.getLines map JParser.parseUnsafe toStream
currentIndex += parsed.length
parsed zip (Stream from index) map {
case (value, id) => JObject(JField("key", JArray(JNum(id) :: Nil)) :: JField("value", value) :: Nil)
}
}
}
case x =>
Validation.failure(ResourceError.corrupt("Attempted to load JSON as a table from something that wasn't a string: " + x))
}
eventsV.disjunction.map(ss => fromJson(ss.flatten))
}
}
}
object Table extends TableCompanion
private var initialIndices = mutable.Map[Path, Int]() // if we were doing this for real: j.u.c.HashMap
private var currentIndex = 0 // if we were doing this for real: j.u.c.a.AtomicInteger
private val indexLock = new AnyRef // if we were doing this for real: DIE IN A FIRE!!!
}
|
drostron/quasar
|
mimir/src/test/scala/quasar/mimir/EvaluatorSpecs.scala
|
Scala
|
apache-2.0
| 4,152 |
package ml.combust.mleap.core.feature
import ml.combust.mleap.core.Model
import ml.combust.mleap.core.types.{BasicType, ListType, ScalarType, StructType}
import scala.util.matching.Regex
case class RegexTokenizerModel(regex: Regex, matchGaps: Boolean = true, tokenMinLength: Int = 1, lowercaseText: Boolean = true) extends Model {
def apply(raw: String): Seq[String] = {
val text = if (lowercaseText) raw.toLowerCase else raw
val tokens = if (matchGaps) regex.split(text).toSeq else regex.findAllIn(text).toSeq
tokens.filter(_.length >= tokenMinLength)
}
override def inputSchema: StructType = StructType("input" -> ScalarType.String.nonNullable).get
override def outputSchema: StructType = StructType("output" -> ListType(BasicType.String)).get
}
|
combust/mleap
|
mleap-core/src/main/scala/ml/combust/mleap/core/feature/RegexTokenizerModel.scala
|
Scala
|
apache-2.0
| 774 |
package org.modelfun
import paths.{UnitPath, Path}
/**
*
*/
case class Extrusion(path: Parameters=>Path = UnitPath, outline: Parameters=>Path) extends Model
|
zzorn/modelfun
|
src/main/scala/org/modelfun/models/Extrusion.scala
|
Scala
|
lgpl-3.0
| 163 |
package com.rasterfoundry.database
import com.rasterfoundry.common.Generators.Implicits._
import com.rasterfoundry.datamodel.PageRequest
import com.rasterfoundry.datamodel._
import cats.implicits._
import doobie.implicits._
import org.scalacheck.Prop.forAll
import org.scalatestplus.scalacheck.Checkers
import org.scalatest.funsuite.AnyFunSuite
import org.scalatest.matchers.should.Matchers
class LayerScenesDaoSpec
extends AnyFunSuite
with Matchers
with Checkers
with DBTestConfig
with PropTestHelpers {
test("list scenes for a project layer") {
check {
forAll {
(
user: User.Create,
org: Organization.Create,
platform: Platform,
project: Project.Create,
scenes: List[Scene.Create],
dsCreate: Datasource.Create,
page: PageRequest,
csq: ProjectSceneQueryParameters
) =>
{
val scenesInsertWithUserProjectIO = for {
(dbUser, _, _, dbProject) <- insertUserOrgPlatProject(
user,
org,
platform,
project
)
datasource <- DatasourceDao.create(
dsCreate.toDatasource(dbUser),
dbUser
)
scenesInsert <- (scenes map {
fixupSceneCreate(dbUser, datasource, _)
}).traverse(
(scene: Scene.Create) => SceneDao.insert(scene, dbUser)
)
} yield (scenesInsert, dbUser, dbProject)
val scenesListIO = scenesInsertWithUserProjectIO flatMap {
case (
dbScenes: List[Scene.WithRelated],
_: User,
dbProject: Project
) =>
ProjectDao.addScenesToProject(
dbScenes map { _.id },
dbProject.id,
dbProject.defaultLayerId
) flatMap { _ =>
{
ProjectLayerScenesDao.listLayerScenes(
dbProject.defaultLayerId,
page,
csq
) map {
paginatedResponse: PaginatedResponse[
Scene.ProjectScene
] =>
(dbScenes, paginatedResponse.results)
}
}
}
}
val (insertedScenes, listedScenes) =
scenesListIO.transact(xa).unsafeRunSync
val insertedIds = insertedScenes.toSet map {
scene: Scene.WithRelated =>
scene.id
}
val listedIds = listedScenes.toSet map {
scene: Scene.ProjectScene =>
scene.id
}
// page request can ask for fewer scenes than the number we inserted
(insertedIds & listedIds) == listedIds
}
}
}
}
test("count scenes in layers for a project") {
check {
forAll {
(
user: User.Create,
org: Organization.Create,
platform: Platform,
project: Project.Create,
layersWithScenes: List[(ProjectLayer.Create, List[Scene.Create])],
dsCreate: Datasource.Create
) =>
{
val countsWithCountedIO = for {
(dbUser, _, _, dbProject) <- insertUserOrgPlatProject(
user,
org,
platform,
project
)
dbDatasource <- fixupDatasource(dsCreate, dbUser)
dbLayersWithSceneCounts <- layersWithScenes traverse {
case (projectLayerCreate, scenesList) =>
for {
dbProjectLayer <- ProjectLayerDao.insertProjectLayer(
projectLayerCreate
.copy(projectId = Some(dbProject.id))
.toProjectLayer
)
dbScenes <- scenesList traverse { scene =>
SceneDao.insert(
fixupSceneCreate(dbUser, dbDatasource, scene),
dbUser
)
}
_ <- ProjectDao.addScenesToProject(
dbScenes map { _.id },
dbProject.id,
dbProjectLayer.id
)
} yield { (dbProjectLayer.id, dbScenes.length) }
}
counted <- ProjectLayerScenesDao.countLayerScenes(dbProject.id)
} yield (counted, dbLayersWithSceneCounts)
val (counted, expectedCounts) =
countsWithCountedIO.transact(xa) map {
case (tups1, tups2) => (Map(tups1: _*), Map(tups2: _*))
} unsafeRunSync
val expectation =
if (counted.isEmpty) {
expectedCounts.values.sum == 0
} else {
expectedCounts.filter(kvPair => kvPair._2 != 0) == counted
}
assert(
expectation,
"Counts by layer id should equal the counts of scenes added to each layer"
)
true
}
}
}
}
}
|
raster-foundry/raster-foundry
|
app-backend/db/src/test/scala/com/azavea/rf/database/ProjectLayerScenesDaoSpec.scala
|
Scala
|
apache-2.0
| 5,349 |
package actors
import models._
import daos.Store
import serializers.SerializerComponent
import akka.actor._
import akka.event.Logging
import akka.routing.SmallestMailboxRouter
trait CheckerComponent {
this:Store with BestPricerComponent with ComponentSystem=>
val checkerActorRef = system.actorOf(Props(new CheckerActor).withRouter(
SmallestMailboxRouter(nrOfInstances = 2)))
class CheckerActor extends Actor{
val log = Logging(context.system, this)
def receive={
case Check(searchRequest:SearchRequest, cheapestPrice:CheapestPrice)=>{
log.info("Receive :"+ searchRequest+ " with: "+cheapestPrice)
val monitors = store.getMonitorBySearchRequest(searchRequest)
monitors.foreach(monitor =>{
if(monitor.price > cheapestPrice.price){
store.storeMonitor(monitor.copy(price=cheapestPrice.price))
store.getUser(monitor.userId).map ( user => {
bestPricerComponent!CheckBestPrice(Notify(user, cheapestPrice, searchRequest, cheapestPrice.price))
})
}
})
}
}
}
}
|
soulofpeace/FareHound
|
app/actors/CheckerComponent.scala
|
Scala
|
apache-2.0
| 1,100 |
package jerimum
import br.edu.ifrn.potigol.Potigolutil.{ Inteiro, Real, Texto }
import java.awt.Font
object Fonte {
}
case class Fonte(tamanho: Inteiro) {
private[this] val font = new Font("Dialog", Font.BOLD, tamanho);
def desenhe_centralizado(msg: Texto, x: Real, y: Real, z: Inteiro, cor: Cor = Cor.BRANCO) = {
Desenho.incluir(z, g => {
g.setColor(cor.color)
g.setFont(font)
val largura = g.getFontMetrics.stringWidth(msg)
val altura = g.getFontMetrics.getHeight
g.drawString(msg, x.toInt - largura / 2, y.toInt - altura / 2)
})
}
def desenhe(msg: Texto, x: Real, y: Real, z: Inteiro, cor: Cor = Cor.BRANCO) = {
Desenho.incluir(z, g => {
g.setColor(cor.color)
g.setFont(font)
g.drawString(msg, x.toInt, y.toInt)
})
}
}
|
potigol/Jerimum
|
src/main/scala/jerimum/Fonte.scala
|
Scala
|
mit
| 832 |
package neuroflow.nets.cpu
import breeze.linalg._
import breeze.stats._
import neuroflow.core.Network._
import neuroflow.core.WaypointLogic.NoOp
import neuroflow.core.{CanProduce, _}
import neuroflow.dsl._
import scala.annotation.tailrec
import scala.collection.Seq
import scala.collection.mutable.ArrayBuffer
import scala.util.Try
/**
*
* Convolutional Neural Network running on CPU,
* uses gradient descent to optimize the loss function.
*
* @author bogdanski
* @since 31.08.17
*
*/
object ConvNetwork {
implicit object double extends Constructor[Double, ConvNetworkDouble] {
def apply(ls: Seq[Layer[Double]], loss: LossFunction[Double], settings: Settings[Double])(implicit breeder: WeightBreeder[Double]): ConvNetworkDouble = {
ConvNetworkDouble(ls, loss, settings, breeder(ls))
}
}
implicit object weights_double extends neuroflow.core.WeightBreeder.Initializer[Double]
implicit object single extends Constructor[Float, ConvNetworkFloat] {
def apply(ls: Seq[Layer[Float]], loss: LossFunction[Float], settings: Settings[Float])(implicit breeder: WeightBreeder[Float]): ConvNetworkFloat = {
ConvNetworkFloat(ls, loss, settings, breeder(ls))
}
}
implicit object weights_float extends neuroflow.core.WeightBreeder.Initializer[Float]
}
//<editor-fold defaultstate="collapsed" desc="Double Precision Impl">
case class ConvNetworkDouble(layers: Seq[Layer[Double]], lossFunction: LossFunction[Double], settings: Settings[Double], weights: Weights[Double],
identifier: String = "neuroflow.nets.cpu.ConvNetwork", numericPrecision: String = "Double")
extends CNN[Double] with WaypointLogic[Double] {
type Vector = DenseVector[Double]
type Matrix = DenseMatrix[Double]
type Tensor = Tensor3D[Double]
type Vectors = Seq[DenseVector[Double]]
type Matrices = Seq[DenseMatrix[Double]]
type Tensors = Seq[Tensor3D[Double]]
private val _allLayers = layers.map {
case d: Dense[Double] => d
case c: Convolution[Double] => c
}.toArray
private val _lastLayerIdx = weights.size - 1
private val _convLayers =
_allLayers.zipWithIndex.map(_.swap).filter {
case (_, _: Convolution[_]) => true
case _ => false
}.toMap.mapValues {
case c: Convolution[Double] => c
}
private val _activators = _allLayers.map(_.activator)
private val _outputDim = _allLayers.last.neurons
private val _lastC = _convLayers.maxBy(_._1)._1
private val _lastL = _allLayers.indices.last
/**
* Computes output for `x`.
*/
def apply(x: Tensor): Vector = {
sink(x.matrix, _lastLayerIdx, batchSize = 1).toDenseVector
}
/**
* Computes output for given inputs `in`
* using efficient batch mode.
*/
def batchApply(xs: Tensors): Vectors = {
BatchBreeder.unsliceMatrixByRow {
sink(BatchBreeder.horzCatTensorBatch(xs), _lastLayerIdx, batchSize = xs.size)
}
}
/**
* `apply` under a focused layer.
*/
def focus[L <: Layer[Double]](l: L)(implicit cp: CanProduce[(Matrix, L), l.algebraicType]): Tensor => l.algebraicType = {
val lwi = layers.zipWithIndex
val idx = lwi.find(_._1 eq l).orElse {
val p = lwi.filter(_._1 == l)
if (p.size > 1) warn(s"Focus layer $l is ambiguous. Taking first. " +
"Alternatively, use a direct object reference to the desired layer.")
p.headOption
} match {
case Some((l, i)) => debug(s"Found focus layer $l at index $i."); i
case None => warn(s"Focus layer $l not found. Fallback to last layer."); _lastLayerIdx
}
(in: Tensor) => {
cp(sink(in.matrix, idx, batchSize = 1), l)
}
}
/**
* Trains this net with input `xs` against output `ys`.
*/
def train(xs: Tensors, ys: Vectors): Try[Run] = Try {
import settings._
val batchSize = settings.batchSize.getOrElse(xs.size)
require(xs.size == ys.size, s"Mismatch between sample sizes. (${xs.size} != ${ys.size})")
if (settings.verbose) {
if(xs.size % batchSize != 0) warn(s"Batches are not even. (${xs.size} % $batchSize = ${xs.size % batchSize} != 0)")
info(s"Training with ${xs.size} samples, batch size = $batchSize, batches = ${math.ceil(xs.size.toDouble / batchSize.toDouble).toInt}.")
info(s"Breeding batches ...")
}
val (xsys, batchSizes) = BatchBreeder.breedCNN(xs, ys, batchSize)
run(xsys, learningRate(1 -> 1.0), batchSizes, precision, batch = 0, batches = xsys.size, iteration = 1, iterations, startTime = System.currentTimeMillis())
}
private def sink(x: Matrix, target: Int, batchSize: Int): Matrix = {
val r1 = flow(x, target, batchSize)
val r2 = if (target == _lastLayerIdx) lossFunction.sink(r1) else r1
r2
}
private def flow(in: Matrix, target: Int, batchSize: Int): Matrix = {
val _fa = ArrayBuffer.empty[Matrix]
val _fr = ArrayBuffer.empty[Matrix]
@tailrec def conv(_in: Matrix, i: Int): Unit = {
val l = _convLayers(i)
val p = weights(i) * convolute(_in, l, batchSize)
val a = p.map(_activators(i))
_fa += { if (i == _lastC) reshape_batch(a, l.dimOut, batchSize) else a }
_fr += a
if (i < _lastC) conv(a, i + 1)
}
@tailrec def fully(_in: Matrix, i: Int): Unit = {
val l = _allLayers(i)
val p = _in * weights(i)
val a = p.map(_activators(i))
_fa += a
_fr += a
if (i < _lastL) fully(a, i + 1)
}
conv(in, 0)
fully(_fa(_lastC), _lastC + 1)
_fr(target)
}
/**
* The training loop.
*/
@tailrec private def run(xsys: Seq[(Matrix, Matrix)], stepSize: Double, batchSizes: Map[Int, Int], precision: Double,
batch: Int, batches: Int, iteration: Int, maxIterations: Int, startTime: Long): Run = {
val batchSize = batchSizes(batch)
val (x, y) = (xsys(batch)._1, xsys(batch)._2)
val loss =
if (settings.approximation.isDefined) adaptWeightsApprox(x, y, stepSize, batchSize)
else adaptWeights(x, y, stepSize, batchSize)
val lossMean = mean(loss)
if (settings.verbose) info(f"Iteration $iteration.${batch + 1}, Avg. Loss = $lossMean%.6g, Vector: $loss")
maybeGraph(lossMean)
waypoint(NoOp)(iteration)
if (lossMean > precision && iteration < maxIterations) {
run(xsys, settings.learningRate(iteration + 1 -> stepSize), batchSizes,
precision, (batch + 1) % batches, batches, iteration + 1, maxIterations, startTime)
} else {
info(f"Took $iteration of $maxIterations iterations.")
Run(startTime, System.currentTimeMillis(), iteration)
}
}
/**
* Computes gradient for weights with respect to given batch,
* adapts their value using gradient descent and returns the loss matrix.
*/
private def adaptWeights(x: Matrix, y: Matrix, stepSize: Double, batchSize: Int): Matrix = {
import settings.updateRule
val loss = DenseMatrix.zeros[Double](batchSize, _outputDim)
val fa = collection.mutable.Map.empty[Int, Matrix]
val fb = collection.mutable.Map.empty[Int, Matrix]
val fc = collection.mutable.Map.empty[Int, Matrix]
val dws = collection.mutable.Map.empty[Int, Matrix]
val ds = collection.mutable.Map.empty[Int, Matrix]
@tailrec def conv(_in: Matrix, i: Int): Unit = {
val l = _convLayers(i)
val c = convolute(_in, l, batchSize)
val p = weights(i) * c
val a = p.map(_activators(i))
val b = p.map(_activators(i).derivative)
fa += i -> { if (i == _lastC) reshape_batch(a, l.dimOut, batchSize) else a }
fb += i -> b
fc += i -> c
if (i < _lastC) conv(a, i + 1)
}
@tailrec def fully(_in: Matrix, i: Int): Unit = {
val l = _allLayers(i)
val p = _in * weights(i)
val a = p.map(_activators(i))
val b = p.map(_activators(i).derivative)
fa += i -> a
fb += i -> b
if (i < _lastL) fully(a, i + 1)
}
@tailrec def derive(i: Int): Unit = {
if (i == _lastLayerIdx) {
val (err, grad) = lossFunction(y, fa(i))
val d = grad *:* fb(i)
val dw = fa(i - 1).t * d
dws += i -> dw
ds += i -> d
loss += err
derive(i - 1)
} else if (i < _lastLayerIdx && i > _lastC) {
val d = (ds(i + 1) * weights(i + 1).t) *:* fb(i)
val dw = fa(i - 1).t * d
dws += i -> dw
ds += i -> d
derive(i - 1)
} else if (i == _lastC) {
val l = _convLayers(i)
val d1 = ds(i + 1) * weights(i + 1).t
val d2 = reshape_batch_backprop(d1, l.dimOut, batchSize)
val d = d2 *:* fb(i)
val dw = d * fc(i).t
dws += i -> dw
ds += i -> d
if (i > 0) derive(i - 1)
} else {
val l = _convLayers(i + 1)
val ww = reshape_batch(weights(i + 1), (l.field._1, l.field._2, l.filters), l.dimIn._3)
val dc = convolute_backprop(ds(i + 1), l, batchSize)
val d = ww * dc *:* fb(i)
val dw = d * fc(i).t
dws += i -> dw
ds += i -> d
if (i > 0) derive(i - 1)
}
}
conv(x, 0)
fully(fa(_lastC), _lastC + 1)
derive(_lastLayerIdx)
(0 to _lastLayerIdx).foreach(i => updateRule(weights(i), dws(i), stepSize, i))
val lossReduced = (loss.t * DenseMatrix.ones[Double](loss.rows, 1)).t
lossReduced
}
/** For debugging, approximates the gradients using `settings.approximation`. */
private def adaptWeightsApprox(xs: Matrix, ys: Matrix, stepSize: Double, batchSize: Int): Matrix = {
require(settings.updateRule.isInstanceOf[Debuggable[Double]])
val _rule: Debuggable[Double] = settings.updateRule.asInstanceOf[Debuggable[Double]]
def lossFunc(): Matrix = {
val loss = lossFunction(ys, flow(xs, _lastLayerIdx, batchSize))._1
val reduced = (loss.t * DenseMatrix.ones[Double](loss.rows, 1)).t
reduced
}
val out = lossFunc()
def approximateGradient(weightLayer: Int, weight: (Int, Int)): Double = {
sum(settings.approximation.get.apply(weights, lossFunc, () => (), weightLayer, weight))
}
val updates = collection.mutable.HashMap.empty[(Int, (Int, Int)), Double]
val grads = collection.mutable.HashMap.empty[(Int, (Int, Int)), Double]
val debug = collection.mutable.HashMap.empty[Int, Matrix]
weights.zipWithIndex.foreach {
case (l, idx) =>
debug += idx -> l.copy
l.foreachPair { (k, v) =>
val grad = approximateGradient(idx, k)
updates += (idx, k) -> (v - (stepSize * grad))
grads += (idx, k) -> grad
}
}
updates.foreach {
case ((wl, k), v) =>
weights(wl).update(k, v)
}
grads.foreach {
case ((wl, k), v) =>
debug(wl).update(k, v)
}
_rule.lastGradients = debug
out
}
}
//</editor-fold>
//<editor-fold defaultstate="collapsed" desc="Single Precision Impl">
case class ConvNetworkFloat(layers: Seq[Layer[Float]], lossFunction: LossFunction[Float], settings: Settings[Float], weights: Weights[Float],
identifier: String = "neuroflow.nets.cpu.ConvNetwork", numericPrecision: String = "Single")
extends CNN[Float] with WaypointLogic[Float] {
type Vector = DenseVector[Float]
type Matrix = DenseMatrix[Float]
type Tensor = Tensor3D[Float]
type Vectors = Seq[DenseVector[Float]]
type Matrices = Seq[DenseMatrix[Float]]
type Tensors = Seq[Tensor3D[Float]]
private val _allLayers = layers.map {
case d: Dense[Float] => d
case c: Convolution[Float] => c
}.toArray
private val _lastLayerIdx = weights.size - 1
private val _convLayers =
_allLayers.zipWithIndex.map(_.swap).filter {
case (_, _: Convolution[_]) => true
case _ => false
}.toMap.mapValues {
case c: Convolution[Float] => c
}
private val _activators = _allLayers.map(_.activator)
private val _outputDim = _allLayers.last.neurons
private val _lastC = _convLayers.maxBy(_._1)._1
private val _lastL = _allLayers.indices.last
/**
* Computes output for `x`.
*/
def apply(x: Tensor): Vector = {
sink(x.matrix, _lastLayerIdx, batchSize = 1).toDenseVector
}
/**
* Computes output for given inputs `in`
* using efficient batch mode.
*/
def batchApply(xs: Tensors): Vectors = {
BatchBreeder.unsliceMatrixByRow {
sink(BatchBreeder.horzCatTensorBatch(xs), _lastLayerIdx, batchSize = xs.size)
}
}
/**
* `apply` under a focused layer.
*/
def focus[L <: Layer[Float]](l: L)(implicit cp: CanProduce[(Matrix, L), l.algebraicType]): Tensor => l.algebraicType = {
val lwi = layers.zipWithIndex
val idx = lwi.find(_._1 eq l).orElse {
val p = lwi.filter(_._1 == l)
if (p.size > 1) warn(s"Focus layer $l is ambiguous. Taking first. " +
"Alternatively, use a direct object reference to the desired layer.")
p.headOption
} match {
case Some((l, i)) => debug(s"Found focus layer $l at index $i."); i
case None => warn(s"Focus layer $l not found. Fallback to last layer."); _lastLayerIdx
}
(in: Tensor) => {
cp(sink(in.matrix, idx, batchSize = 1), l)
}
}
/**
* Trains this net with input `xs` against output `ys`.
*/
def train(xs: Tensors, ys: Vectors): Try[Run] = Try {
import settings._
val batchSize = settings.batchSize.getOrElse(xs.size)
require(xs.size == ys.size, s"Mismatch between sample sizes. (${xs.size} != ${ys.size})")
if (settings.verbose) {
if(xs.size % batchSize != 0) warn(s"Batches are not even. (${xs.size} % $batchSize = ${xs.size % batchSize} != 0)")
info(s"Training with ${xs.size} samples, batch size = $batchSize, batches = ${math.ceil(xs.size.toDouble / batchSize.toDouble).toInt}.")
info(s"Breeding batches ...")
}
val (xsys, batchSizes) = BatchBreeder.breedCNN(xs, ys, batchSize)
run(xsys, learningRate(1 -> 1.0f), batchSizes, precision, batch = 0, batches = xsys.size, iteration = 1, iterations, startTime = System.currentTimeMillis())
}
private def sink(x: Matrix, target: Int, batchSize: Int): Matrix = {
val r1 = flow(x, target, batchSize)
val r2 = if (target == _lastLayerIdx) lossFunction.sink(r1) else r1
r2
}
private def flow(in: Matrix, target: Int, batchSize: Int): Matrix = {
val _fa = ArrayBuffer.empty[Matrix]
val _fr = ArrayBuffer.empty[Matrix]
@tailrec def conv(_in: Matrix, i: Int): Unit = {
val l = _convLayers(i)
val p = weights(i) * convolute(_in, l, batchSize)
val a = p.map(_activators(i))
_fa += { if (i == _lastC) reshape_batch(a, l.dimOut, batchSize) else a }
_fr += a
if (i < _lastC) conv(a, i + 1)
}
@tailrec def fully(_in: Matrix, i: Int): Unit = {
val l = _allLayers(i)
val p = _in * weights(i)
val a = p.map(_activators(i))
_fa += a
_fr += a
if (i < _lastL) fully(a, i + 1)
}
conv(in, 0)
fully(_fa(_lastC), _lastC + 1)
_fr(target)
}
/**
* The training loop.
*/
@tailrec private def run(xsys: Seq[(Matrix, Matrix)], stepSize: Float, batchSizes: Map[Int, Int], precision: Double,
batch: Int, batches: Int, iteration: Int, maxIterations: Int, startTime: Long): Run = {
val batchSize = batchSizes(batch)
val (x, y) = (xsys(batch)._1, xsys(batch)._2)
val loss =
if (settings.approximation.isDefined) adaptWeightsApprox(x, y, stepSize, batchSize)
else adaptWeights(x, y, stepSize, batchSize)
val lossMean = mean(loss)
if (settings.verbose) info(f"Iteration $iteration.${batch + 1}, Avg. Loss = $lossMean%.6g, Vector: $loss")
maybeGraph(lossMean)
waypoint(NoOp)(iteration)
if (lossMean > precision && iteration < maxIterations) {
run(xsys, settings.learningRate(iteration + 1 -> stepSize), batchSizes,
precision, (batch + 1) % batches, batches, iteration + 1, maxIterations, startTime)
} else {
info(f"Took $iteration of $maxIterations iterations.")
Run(startTime, System.currentTimeMillis(), iteration)
}
}
/**
* Computes gradient for weights with respect to given batch,
* adapts their value using gradient descent and returns the loss matrix.
*/
private def adaptWeights(x: Matrix, y: Matrix, stepSize: Float, batchSize: Int): Matrix = {
import settings.updateRule
val loss = DenseMatrix.zeros[Float](batchSize, _outputDim)
val fa = collection.mutable.Map.empty[Int, Matrix]
val fb = collection.mutable.Map.empty[Int, Matrix]
val fc = collection.mutable.Map.empty[Int, Matrix]
val dws = collection.mutable.Map.empty[Int, Matrix]
val ds = collection.mutable.Map.empty[Int, Matrix]
@tailrec def conv(_in: Matrix, i: Int): Unit = {
val l = _convLayers(i)
val c = convolute(_in, l, batchSize)
val p = weights(i) * c
val a = p.map(_activators(i))
val b = p.map(_activators(i).derivative)
fa += i -> { if (i == _lastC) reshape_batch(a, l.dimOut, batchSize) else a }
fb += i -> b
fc += i -> c
if (i < _lastC) conv(a, i + 1)
}
@tailrec def fully(_in: Matrix, i: Int): Unit = {
val l = _allLayers(i)
val p = _in * weights(i)
val a = p.map(_activators(i))
val b = p.map(_activators(i).derivative)
fa += i -> a
fb += i -> b
if (i < _lastL) fully(a, i + 1)
}
@tailrec def derive(i: Int): Unit = {
if (i == _lastLayerIdx) {
val (err, grad) = lossFunction(y, fa(i))
val d = grad *:* fb(i)
val dw = fa(i - 1).t * d
dws += i -> dw
ds += i -> d
loss += err
derive(i - 1)
} else if (i < _lastLayerIdx && i > _lastC) {
val d = (ds(i + 1) * weights(i + 1).t) *:* fb(i)
val dw = fa(i - 1).t * d
dws += i -> dw
ds += i -> d
derive(i - 1)
} else if (i == _lastC) {
val l = _convLayers(i)
val d1 = ds(i + 1) * weights(i + 1).t
val d2 = reshape_batch_backprop(d1, l.dimOut, batchSize)
val d = d2 *:* fb(i)
val dw = d * fc(i).t
dws += i -> dw
ds += i -> d
if (i > 0) derive(i - 1)
} else {
val l = _convLayers(i + 1)
val ww = reshape_batch(weights(i + 1), (l.field._1, l.field._2, l.filters), l.dimIn._3)
val dc = convolute_backprop(ds(i + 1), l, batchSize)
val d = ww * dc *:* fb(i)
val dw = d * fc(i).t
dws += i -> dw
ds += i -> d
if (i > 0) derive(i - 1)
}
}
conv(x, 0)
fully(fa(_lastC), _lastC + 1)
derive(_lastLayerIdx)
(0 to _lastLayerIdx).foreach(i => updateRule(weights(i), dws(i), stepSize, i))
val lossReduced = (loss.t * DenseMatrix.ones[Float](loss.rows, 1)).t
lossReduced
}
/** For debugging, approximates the gradients using `settings.approximation`. */
private def adaptWeightsApprox(xs: Matrix, ys: Matrix, stepSize: Float, batchSize: Int): Matrix = {
require(settings.updateRule.isInstanceOf[Debuggable[Float]])
val _rule: Debuggable[Float] = settings.updateRule.asInstanceOf[Debuggable[Float]]
def lossFunc(): Matrix = {
val loss = lossFunction(ys, flow(xs, _lastLayerIdx, batchSize))._1
val reduced = (loss.t * DenseMatrix.ones[Float](loss.rows, 1)).t
reduced
}
val out = lossFunc()
def approximateGradient(weightLayer: Int, weight: (Int, Int)): Float = {
sum(settings.approximation.get.apply(weights, lossFunc, () => (), weightLayer, weight))
}
val updates = collection.mutable.HashMap.empty[(Int, (Int, Int)), Float]
val grads = collection.mutable.HashMap.empty[(Int, (Int, Int)), Float]
val debug = collection.mutable.HashMap.empty[Int, Matrix]
weights.zipWithIndex.foreach {
case (l, idx) =>
debug += idx -> l.copy
l.foreachPair { (k, v) =>
val grad = approximateGradient(idx, k)
updates += (idx, k) -> (v - (stepSize * grad))
grads += (idx, k) -> grad
}
}
updates.foreach {
case ((wl, k), v) =>
weights(wl).update(k, v)
}
grads.foreach {
case ((wl, k), v) =>
debug(wl).update(k, v)
}
_rule.lastGradients = debug
out
}
}
//</editor-fold>
|
zenecture/neuroflow
|
core/src/main/scala/neuroflow/nets/cpu/ConvNetwork.scala
|
Scala
|
apache-2.0
| 20,369 |
/* *\\
** \\ \\ / _) \\ \\ / \\ | **
** \\ \\ / | __ \\ _ \\ __| \\ \\ / |\\/ | **
** \\ \\ / | | | __/ | \\ \\ / | | **
** \\_/ _| .__/ \\___| _| \\_/ _| _| **
** _| **
** **
** ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ **
** **
** http://www.vipervm.org **
** GPLv3 **
\\* */
package org.vipervm.runtime.data
import org.vipervm.platform._
/**
* Contiguous raw data
*/
class RawData(size:Long) extends MetaView {
type ViewType = BufferView1D
def allocate(memory:MemoryNode):BufferView1D = {
val buffer = memory.allocate(size)
new BufferView1D(buffer, 0, size)
}
}
|
hsyl20/Scala_ViperVM
|
src/main/scala/org/vipervm/runtime/data/RawData.scala
|
Scala
|
gpl-3.0
| 967 |
package de.uni_potsdam.hpi.coheel.programs
import de.uni_potsdam.hpi.coheel.Params
import de.uni_potsdam.hpi.coheel.programs.DataClasses.InputDocument
import de.uni_potsdam.hpi.coheel.util.Util
import de.uni_potsdam.hpi.coheel.wiki.TokenizerHelper
import org.apache.flink.api.common.functions.RichFlatMapFunction
import org.apache.flink.configuration.Configuration
import org.apache.flink.util.Collector
import scala.util.Random
class InputDocumentDistributorFlatMap(params: Params, nrDocuments: Int, runsOffline: Boolean) extends RichFlatMapFunction[String, InputDocument] {
import CoheelLogger._
var index: Int = -1
var random: Random = null
val parallelism = params.parallelism
log.info(s"Basing distribution on parallelism $parallelism")
val halfParallelism = if (CoheelProgram.runsOffline()) 1 else parallelism / 2
val firstHalf = if (runsOffline) List(0) else List.range(0, halfParallelism)
val secondHalf = if (runsOffline) List(0) else List.range(halfParallelism, parallelism)
var isFirstHalf: Boolean = true
override def open(params: Configuration): Unit = {
index = getRuntimeContext.getIndexOfThisSubtask
isFirstHalf = firstHalf contains index
random = new Random()
}
override def flatMap(text: String, out: Collector[InputDocument]): Unit = {
val tokenizerResult = TokenizerHelper.tokenizeWithStemmedAndUnstemmedAndTags(text)
val id = s"$nrDocuments-${Util.id(text)}"
log.info(s"Reading document $id on index $index")
val tokensStemmed = tokenizerResult.tokensStemmed
// TODO: Temporarily changed to work on old data
val tokensUnstemmed = tokenizerResult.tokensStemmed
val tags = tokenizerResult.tags
if (isFirstHalf) {
out.collect(InputDocument(id, 0, index, tokensStemmed, tokensUnstemmed, tags))
if (!CoheelProgram.runsOffline()) {
val randomIndex = secondHalf(random.nextInt(halfParallelism))
out.collect(InputDocument(id, 1, randomIndex, tokensStemmed, tokensUnstemmed, tags))
log.info(s"Distributing to $index and $randomIndex")
}
} else {
if (!CoheelProgram.runsOffline()) {
val randomIndex = firstHalf(random.nextInt(halfParallelism))
out.collect(InputDocument(id, 0, randomIndex, tokensStemmed, tokensUnstemmed, tags))
log.info(s"Distributing to $index and $randomIndex")
}
out.collect(InputDocument(id, 1, index, tokensStemmed, tokensUnstemmed, tags))
}
}
}
|
stratosphere/coheel
|
src/main/scala/de/uni_potsdam/hpi/coheel/programs/InputDocumentDistributorFlatMap.scala
|
Scala
|
apache-2.0
| 2,370 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.expressions.aggregate
import java.nio.ByteBuffer
import com.google.common.primitives.{Doubles, Ints, Longs}
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.analysis.TypeCheckResult
import org.apache.spark.sql.catalyst.analysis.TypeCheckResult.{TypeCheckFailure, TypeCheckSuccess}
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.expressions.aggregate.ApproximatePercentile.PercentileDigest
import org.apache.spark.sql.catalyst.util.{ArrayData, GenericArrayData}
import org.apache.spark.sql.catalyst.util.QuantileSummaries
import org.apache.spark.sql.catalyst.util.QuantileSummaries.{defaultCompressThreshold, Stats}
import org.apache.spark.sql.types._
/**
* The ApproximatePercentile function returns the approximate percentile(s) of a column at the given
* percentage(s). A percentile is a watermark value below which a given percentage of the column
* values fall. For example, the percentile of column `col` at percentage 50% is the median of
* column `col`.
*
* This function supports partial aggregation.
*
* @param child child expression that can produce column value with `child.eval(inputRow)`
* @param percentageExpression Expression that represents a single percentage value or
* an array of percentage values. Each percentage value must be between
* 0.0 and 1.0.
* @param accuracyExpression Integer literal expression of approximation accuracy. Higher value
* yields better accuracy, the default value is
* DEFAULT_PERCENTILE_ACCURACY.
*/
@ExpressionDescription(
usage = """
_FUNC_(col, percentage [, accuracy]) - Returns the approximate percentile value of numeric
column `col` at the given percentage. The value of percentage must be between 0.0
and 1.0. The `accuracy` parameter (default: 10000) is a positive numeric literal which
controls approximation accuracy at the cost of memory. Higher value of `accuracy` yields
better accuracy, `1.0/accuracy` is the relative error of the approximation.
When `percentage` is an array, each value of the percentage array must be between 0.0 and 1.0.
In this case, returns the approximate percentile array of column `col` at the given
percentage array.
""",
examples = """
Examples:
> SELECT _FUNC_(10.0, array(0.5, 0.4, 0.1), 100);
[10.0,10.0,10.0]
> SELECT _FUNC_(10.0, 0.5, 100);
10.0
""")
case class ApproximatePercentile(
child: Expression,
percentageExpression: Expression,
accuracyExpression: Expression,
override val mutableAggBufferOffset: Int,
override val inputAggBufferOffset: Int)
extends TypedImperativeAggregate[PercentileDigest] with ImplicitCastInputTypes {
def this(child: Expression, percentageExpression: Expression, accuracyExpression: Expression) = {
this(child, percentageExpression, accuracyExpression, 0, 0)
}
def this(child: Expression, percentageExpression: Expression) = {
this(child, percentageExpression, Literal(ApproximatePercentile.DEFAULT_PERCENTILE_ACCURACY))
}
// Mark as lazy so that accuracyExpression is not evaluated during tree transformation.
private lazy val accuracy: Int = accuracyExpression.eval().asInstanceOf[Int]
override def inputTypes: Seq[AbstractDataType] = {
// Support NumericType, DateType and TimestampType since their internal types are all numeric,
// and can be easily cast to double for processing.
Seq(TypeCollection(NumericType, DateType, TimestampType),
TypeCollection(DoubleType, ArrayType(DoubleType)), IntegerType)
}
// Mark as lazy so that percentageExpression is not evaluated during tree transformation.
private lazy val (returnPercentileArray: Boolean, percentages: Array[Double]) =
percentageExpression.eval() match {
// Rule ImplicitTypeCasts can cast other numeric types to double
case num: Double => (false, Array(num))
case arrayData: ArrayData => (true, arrayData.toDoubleArray())
}
override def checkInputDataTypes(): TypeCheckResult = {
val defaultCheck = super.checkInputDataTypes()
if (defaultCheck.isFailure) {
defaultCheck
} else if (!percentageExpression.foldable || !accuracyExpression.foldable) {
TypeCheckFailure(s"The accuracy or percentage provided must be a constant literal")
} else if (accuracy <= 0) {
TypeCheckFailure(
s"The accuracy provided must be a positive integer literal (current value = $accuracy)")
} else if (percentages.exists(percentage => percentage < 0.0D || percentage > 1.0D)) {
TypeCheckFailure(
s"All percentage values must be between 0.0 and 1.0 " +
s"(current = ${percentages.mkString(", ")})")
} else {
TypeCheckSuccess
}
}
override def createAggregationBuffer(): PercentileDigest = {
val relativeError = 1.0D / accuracy
new PercentileDigest(relativeError)
}
override def update(buffer: PercentileDigest, inputRow: InternalRow): PercentileDigest = {
val value = child.eval(inputRow)
// Ignore empty rows, for example: percentile_approx(null)
if (value != null) {
// Convert the value to a double value
val doubleValue = child.dataType match {
case DateType => value.asInstanceOf[Int].toDouble
case TimestampType => value.asInstanceOf[Long].toDouble
case n: NumericType => n.numeric.toDouble(value.asInstanceOf[n.InternalType])
case other: DataType =>
throw new UnsupportedOperationException(s"Unexpected data type ${other.catalogString}")
}
buffer.add(doubleValue)
}
buffer
}
override def merge(buffer: PercentileDigest, other: PercentileDigest): PercentileDigest = {
buffer.merge(other)
buffer
}
override def eval(buffer: PercentileDigest): Any = {
val doubleResult = buffer.getPercentiles(percentages)
val result = child.dataType match {
case DateType => doubleResult.map(_.toInt)
case TimestampType => doubleResult.map(_.toLong)
case ByteType => doubleResult.map(_.toByte)
case ShortType => doubleResult.map(_.toShort)
case IntegerType => doubleResult.map(_.toInt)
case LongType => doubleResult.map(_.toLong)
case FloatType => doubleResult.map(_.toFloat)
case DoubleType => doubleResult
case _: DecimalType => doubleResult.map(Decimal(_))
case other: DataType =>
throw new UnsupportedOperationException(s"Unexpected data type ${other.catalogString}")
}
if (result.length == 0) {
null
} else if (returnPercentileArray) {
new GenericArrayData(result)
} else {
result(0)
}
}
override def withNewMutableAggBufferOffset(newOffset: Int): ApproximatePercentile =
copy(mutableAggBufferOffset = newOffset)
override def withNewInputAggBufferOffset(newOffset: Int): ApproximatePercentile =
copy(inputAggBufferOffset = newOffset)
override def children: Seq[Expression] = Seq(child, percentageExpression, accuracyExpression)
// Returns null for empty inputs
override def nullable: Boolean = true
// The result type is the same as the input type.
override def dataType: DataType = {
if (returnPercentileArray) ArrayType(child.dataType, false) else child.dataType
}
override def prettyName: String = "percentile_approx"
override def serialize(obj: PercentileDigest): Array[Byte] = {
ApproximatePercentile.serializer.serialize(obj)
}
override def deserialize(bytes: Array[Byte]): PercentileDigest = {
ApproximatePercentile.serializer.deserialize(bytes)
}
}
object ApproximatePercentile {
// Default accuracy of Percentile approximation. Larger value means better accuracy.
// The default relative error can be deduced by defaultError = 1.0 / DEFAULT_PERCENTILE_ACCURACY
val DEFAULT_PERCENTILE_ACCURACY: Int = 10000
/**
* PercentileDigest is a probabilistic data structure used for approximating percentiles
* with limited memory. PercentileDigest is backed by [[QuantileSummaries]].
*
* @param summaries underlying probabilistic data structure [[QuantileSummaries]].
*/
class PercentileDigest(private var summaries: QuantileSummaries) {
def this(relativeError: Double) = {
this(new QuantileSummaries(defaultCompressThreshold, relativeError, compressed = true))
}
private[sql] def isCompressed: Boolean = summaries.compressed
/** Returns compressed object of [[QuantileSummaries]] */
def quantileSummaries: QuantileSummaries = {
if (!isCompressed) compress()
summaries
}
/** Insert an observation value into the PercentileDigest data structure. */
def add(value: Double): Unit = {
summaries = summaries.insert(value)
}
/** In-place merges in another PercentileDigest. */
def merge(other: PercentileDigest): Unit = {
if (!isCompressed) compress()
summaries = summaries.merge(other.quantileSummaries)
}
/**
* Returns the approximate percentiles of all observation values at the given percentages.
* A percentile is a watermark value below which a given percentage of observation values fall.
* For example, the following code returns the 25th, median, and 75th percentiles of
* all observation values:
*
* {{{
* val Array(p25, median, p75) = percentileDigest.getPercentiles(Array(0.25, 0.5, 0.75))
* }}}
*/
def getPercentiles(percentages: Array[Double]): Array[Double] = {
if (!isCompressed) compress()
if (summaries.count == 0 || percentages.length == 0) {
Array.empty[Double]
} else {
val result = new Array[Double](percentages.length)
var i = 0
while (i < percentages.length) {
// Since summaries.count != 0, the query here never return None.
result(i) = summaries.query(percentages(i)).get
i += 1
}
result
}
}
private final def compress(): Unit = {
summaries = summaries.compress()
}
}
/**
* Serializer for class [[PercentileDigest]]
*
* This class is thread safe.
*/
class PercentileDigestSerializer {
private final def length(summaries: QuantileSummaries): Int = {
// summaries.compressThreshold, summary.relativeError, summary.count
Ints.BYTES + Doubles.BYTES + Longs.BYTES +
// length of summary.sampled
Ints.BYTES +
// summary.sampled, Array[Stat(value: Double, g: Long, delta: Long)]
summaries.sampled.length * (Doubles.BYTES + Longs.BYTES + Longs.BYTES)
}
final def serialize(obj: PercentileDigest): Array[Byte] = {
val summary = obj.quantileSummaries
val buffer = ByteBuffer.wrap(new Array(length(summary)))
buffer.putInt(summary.compressThreshold)
buffer.putDouble(summary.relativeError)
buffer.putLong(summary.count)
buffer.putInt(summary.sampled.length)
var i = 0
while (i < summary.sampled.length) {
val stat = summary.sampled(i)
buffer.putDouble(stat.value)
buffer.putLong(stat.g)
buffer.putLong(stat.delta)
i += 1
}
buffer.array()
}
final def deserialize(bytes: Array[Byte]): PercentileDigest = {
val buffer = ByteBuffer.wrap(bytes)
val compressThreshold = buffer.getInt()
val relativeError = buffer.getDouble()
val count = buffer.getLong()
val sampledLength = buffer.getInt()
val sampled = new Array[Stats](sampledLength)
var i = 0
while (i < sampledLength) {
val value = buffer.getDouble()
val g = buffer.getLong()
val delta = buffer.getLong()
sampled(i) = Stats(value, g, delta)
i += 1
}
val summary = new QuantileSummaries(compressThreshold, relativeError, sampled, count, true)
new PercentileDigest(summary)
}
}
val serializer: PercentileDigestSerializer = new PercentileDigestSerializer
}
|
sahilTakiar/spark
|
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/aggregate/ApproximatePercentile.scala
|
Scala
|
apache-2.0
| 12,863 |
package org.qirx.cms.execution
import scala.concurrent.ExecutionContext
import scala.concurrent.Future
import org.qirx.cms.construction.GetMessages
import org.qirx.cms.construction.Metadata
import org.qirx.cms.construction.Store
import org.qirx.cms.construction.Validate
import org.qirx.cms.machinery.BuildTools
import org.qirx.cms.machinery.ExecutionTools
import org.qirx.cms.machinery.Id
import org.qirx.cms.machinery.ProgramType
import org.qirx.cms.machinery.~>
import org.qirx.cms.metadata.DocumentMetadata
import play.api.libs.json.JsObject
import org.qirx.cms.construction.GetMetadata
/**
* The validate method validates all documents and returns the results as
* a seq of tuples.
*/
class DocumentValidator(
metadata: Metadata ~> Id,
store: Store ~> Future)(implicit ec: ExecutionContext) {
import BuildTools._
import ExecutionTools._
type Document = JsObject
type ValidationResult = Seq[JsObject]
type Result = (Document, DocumentMetadata, ValidationResult)
def validate():Future[Seq[Result]] = validationProgram executeWith runner
private type Elements = ProgramType[(System + Store + Metadata + Seq)#T]
private def validationProgram(implicit e: Elements) =
for {
documentMetadata <- GetMetadata
meta <- documentMetadata.asProgram
messages <- GetMessages(meta)
documents <- Store.List(meta.id)
document <- documents.asProgram
result <- Validate(meta, document, messages)
} yield (document, meta, result)
private val runner = {
val seqRunner = SeqToFutureSeq
val metadataRunner = metadata andThen IdToFuture andThen FutureToFutureSeq
val storeRunner = store andThen FutureToFutureSeq
val systemRunner = SystemToId andThen IdToFuture andThen FutureToFutureSeq
seqRunner or metadataRunner or storeRunner or systemRunner
}
}
|
EECOLOR/play-cms
|
cms/src/main/scala/org/qirx/cms/execution/DocumentValidator.scala
|
Scala
|
mit
| 1,836 |
/*
* Copyright 2011-2022 GatlingCorp (https://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.http.action.sse.fsm
import io.gatling.core.action.Action
import io.gatling.http.check.sse.SseMessageCheckSequence
final case class SetCheck(actionName: String, checkSequences: List[SseMessageCheckSequence], next: Action)
|
gatling/gatling
|
gatling-http/src/main/scala/io/gatling/http/action/sse/fsm/SetCheck.scala
|
Scala
|
apache-2.0
| 861 |
/*
* Copyright (c) 2015, streamdrill UG (haftungsbeschränkt)
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package streamdrill.core
import java.io.{DataInputStream, DataOutputStream}
import java.util.Comparator
import streamdrill.io.Serializer
import streamdrill.logging.Logging
import scala.collection.JavaConverters._
import scala.collection.mutable.ArrayBuffer
/**
* Trait for an ExpDecayTrend. Adds update & remove capabilities.
*
* @author Mikio L. Braun
*/
trait AbstractExpDecayTrend[T] extends AbstractReadOnlyTrend[T] {
def update(t: T, timestamp: Long, value: Double)
def update(t: T, timestamp: Long) {
update(t, timestamp, 1.0)
}
def remove(t: T)
}
/**
* Entry for an ExpDecayTrend is a counter together with a key.
*
* @author Mikio L. Braun
*/
class ExpDecayEntry[T](val key: T, _count: Long, _timestamp: Long) extends ExpDecayCounter(_count, _timestamp) {
var history: Array[Double] = null
override def toString = "ExpDecayEntry(%s, %s, %d)".format(key, ExpDecay.dump(count), timestamp)
}
/**
* ExpDecayTrend
*
* Creates a trend of a given maximum capacity where the counters decay with the given half-time (in milli-seconds).
* The trend will have the given maximum capacity of elements at most and replace older entries to make room based
* on the ones with the smallest activity.
*
* @author Mikio L. Braun
*/
class ExpDecayTrend[T](val capacity: Int, val halfTimeInMs: Long)(implicit ord: Ordering[T])
extends AbstractExpDecayTrend[T]
with Logging
with HasLocks {
private val eds = new ExpDecaySpace(halfTimeInMs)
private val counters = new java.util.TreeMap[T, ExpDecayEntry[T]](new ComparatorFct((t1: T, t2: T) => ord.compare(t1, t2)))
var verbose = false
var verboseCompare = false
var iteration = 0
var replaceOnUpdate = false
var scoreThreshold = 0.0
/**
* Compare function used for the index.
*/
private val counterCmp = new Comparator[ExpDecayEntry[T]] {
def compare(e1: ExpDecayEntry[T], e2: ExpDecayEntry[T]): Int = {
val c = eds.compare(e1, e2)
val result = if (c != 0) {
-c
} else {
math.signum(ord.compare(e1.key, e2.key))
}
if (verboseCompare) {
info("compare %s and %s => %d".format(e1, e2, result))
}
result
}
}
private val index = new java.util.TreeSet[ExpDecayEntry[T]](counterCmp)
def getCounter(t: T) = counters.get(t)
def counterIterator = index.iterator.asScala
/*
* Event Listeners
*/
private var eventListeners: List[TrendEventListener[T]] = Nil
def addEventListener(tel: TrendEventListener[T]) {
eventListeners ::= tel
}
def removeEventListener(tel: TrendEventListener[T]) {
eventListeners.filterNot(_ == tel)
}
def notifyAddEvent(t: T) {
eventListeners.foreach(_.onAdd(t))
}
def notifyRemoveEvent(t: T) {
eventListeners.foreach(_.onRemove(t))
}
/*
* History stuff
*/
private var historyLength = 0
private var historyIndex = 0
def enableHistory(length: Int) = alone {
if (length != historyLength) {
for (c <- counters.values.iterator.asScala) {
c.history = new Array[Double](length)
}
historyLength = length
historyIndex = 0
}
}
def disableHistory() = alone {
for (c <- counters.values.iterator.asScala) {
c.history = null
}
historyLength = 0
historyIndex = 0
}
def history(t: T): Array[Double] = together {
val result = new Array[Double](historyLength)
val c = getCounter(t)
if (c != null) {
for (i <- 0 until historyLength) {
result(i) = c.history((historyIndex + i) % historyLength)
}
}
result
}
def updateHistory() = alone {
if (historyLength != 0) {
val t = lastUpdate
for (c <- counters.values.iterator.asScala) {
c.history(historyIndex) = eds.scale(c, t)
}
historyIndex = (historyIndex + 1) % historyLength
}
}
/**
* Number of elements in the trend.
*
* Always less than or equal to the capacity.
*/
def size = together {
counters.size
}
/**
* Time of the last update.
*/
var lastUpdate = 0L
private def removeLast() {
val last = index.last()
removeEntryFromIndices(last)
counters.remove(last.key)
}
/**
* Get smallest entry in the trend.
*
* @return smallest entry in the trend
*/
def smallest: T = together {
index.last().key
}
/**
* Remove an entry from the trend.
*
* @param t the element to remove
*/
def remove(t: T) = alone {
val c = counters.get(t)
if (c != null) {
removeEntry(c)
}
}
/**
* Called after an entry has been added
*
* @param c the entry to be added
*/
protected def addEntryToIndices(c: ExpDecayEntry[T]) {
index.add(c)
indices.foreach(kv => kv._2.addEvent(c))
notifyAddEvent(c.key)
}
protected def removeEntry(e: ExpDecayEntry[T]) {
removeEntryFromIndices(e)
counters.remove(e.key)
}
/**
* Called before entry is removed
*
* @param c the entry to be removed
*/
protected def removeEntryFromIndices(c: ExpDecayEntry[T]) {
notifyRemoveEvent(c.key)
index.remove(c)
indices.foreach(_._2.removeEvent(c))
}
/**
* Called after each update.
*/
protected def postUpdate() {}
def update(t: T, timestamp: Long, increment: Double) {
alone {
iteration += 1
try {
val newLastUpdate = if (timestamp > lastUpdate) {
timestamp
} else {
lastUpdate
}
val c = counters.get(t)
if (c == null) {
while (counters.size >= capacity) {
removeLast()
}
val c = new ExpDecayEntry(t, eds.make(increment), timestamp)
counters.put(t, c)
addEntryToIndices(c)
if (historyLength != 0)
c.history = new Array[Double](historyLength)
} else {
if (!index.contains(c)) {
println("Hm, how odd. Let's turn on the verbosity and check again... .")
verboseCompare = true
index.contains(c)
verboseCompare = false
}
removeEntryFromIndices(c)
if (!replaceOnUpdate) {
eds.updateToLater(c, timestamp, increment)
} else {
eds.set(c, timestamp, increment)
}
addEntryToIndices(c)
if (counters.size != index.size) {
error(("number of counters (=%d) and index differ (=%d). Which is very bad!\n" +
"This happened while processing %s, which is not the problem probably").format(counters.size, index.size, t))
throw new IllegalStateException("Dataset is corrupt. This shouldn't happen. Please file a bug report.")
}
}
lastUpdate = newLastUpdate
if (scoreThreshold != 0.0) {
while (eds.scale(index.last(), lastUpdate) <= scoreThreshold) {
removeLast()
}
}
postUpdate()
} catch {
case ex: NoSuchElementException =>
throw new NoSuchElementException("when inserting %s: %s".format(t, ex.getMessage))
}
}
}
/**
* Return last update for given element
*
* @param t the element to be queried
* @return either timestamp as millieseconds till epoch or -1L if it wasn't in the set.
*/
def lastUpdate(t: T): Long = together {
val ti = getCounter(t)
if (ti == null)
-1L
else
ti.timestamp
}
/**
* Get the score
*
* @param t the element to get a score for
* @param timestamp a timestamp to calculate the score
* @return the score
*/
def score(t: T, timestamp: Long): Double = together {
val c = counters.get(t)
if (c == null)
0.0
else {
eds.scale(c, timestamp)
}
}
/**
* Query the trend.
*
* @param count how many elements to retrieve
* @param offset where to start from the top (Warning: actively skips elements!)
* @return a list of keys.
*/
def query(count: Int, offset: Int = 0): Seq[T] = together {
query().drop(offset).take(count).toIndexedSeq
}
/**
* Query the trend, but return an iterator
*
* @return
*/
private def query(): Iterator[T] = index.iterator.asScala.map(_.key)
/**
* Query the trend (also return the counters)
*
* @param count how many elements to return
* @param offset the offset of the returned element list
* @param timestamp a timestamp for the returned counters
* @return a list of elements together with counters
*/
def queryWithScore(count: Int, offset: Int = 0, timestamp: Long = 0L): Seq[(T, Double)] = together {
val ts = if (timestamp == 0L) lastUpdate else timestamp
val it = index.iterator()
var o = 0
var c = 0
val result = new ArrayBuffer[(T, Double)]()
while (it.hasNext && c < count) {
val counter = it.next()
if (o >= offset) {
result.append((counter.key, eds.scale(counter, ts)))
c += 1
} else {
o += 1
}
}
result.result()
}
def contains(t: T): Boolean = together {
counters.containsKey(t)
}
private var indices: Map[String, SecondaryIndex[ExpDecayEntry[T], _]] = Map()
var indexNames: Seq[String] = Seq()
def addIndex[Index](name: String, indexMap: IndexMap[T, Index]) {
val newIndex = new SecondaryIndex[ExpDecayEntry[T], Index](new IndexMap[ExpDecayEntry[T], Index]() {
def project(b: ExpDecayEntry[T]): Index = indexMap.project(b.key)
val comparator: Comparator[Index] = indexMap.comparator
}, counterCmp)
alone {
// Add all the data we already have
val it = index.iterator
while (it.hasNext) {
newIndex.addEvent(it.next())
}
// add index
indices += name -> newIndex
indexNames = indexNames :+ name
}
}
def removeIndex(name: String) {
indices -= name
indexNames = indexNames.filterNot(_ == name)
}
def queryIndex[Index](name: String, value: Index, count: Int, offset: Int): Seq[T] = together {
val i = indices(name).asInstanceOf[SecondaryIndex[ExpDecayEntry[T], Index]]
i.query(value, count, offset).map(_.key).toIndexedSeq
}
def queryIndex[Index](name: String, value: Index): Iterator[T] = together {
val i = indices(name).asInstanceOf[SecondaryIndex[ExpDecayEntry[T], Index]]
i.query(value).map(_.key)
}
def queryIndexWithScore[Index](name: String, value: Index, count: Int, offset: Int): Seq[(T, Double)] = together {
val es = queryIndex(name, value, count, offset)
es.map(e => (e, score(e))).toIndexedSeq
}
def save(out: DataOutputStream)(implicit serl: Serializer[T]) = together {
out.writeLong(lastUpdate)
if (historyLength != 0) {
out.writeInt(historyIndex)
}
// The actual data
out.writeInt(counters.size)
val it = counters.entrySet.iterator()
while (it.hasNext) {
val entry = it.next().getValue
//println("Saving out " + entry)
out.writeLong(entry.count)
out.writeLong(entry.timestamp)
serl.write(out, entry.key)
if (historyLength != 0) {
for (i <- 0 until historyLength)
out.writeDouble(entry.history(i))
}
}
}
def load(in: DataInputStream)(implicit serl: Serializer[T]): Unit = alone {
lastUpdate = in.readLong()
if (historyLength != 0) {
historyIndex = in.readInt()
}
val numEntries = in.readInt()
counters.clear()
for (i <- 0 until numEntries) {
val count = in.readLong()
val timestamp = in.readLong()
val key = serl.read(in)
val c = new ExpDecayEntry(key, count, timestamp)
//println("Read in " + c)
if (historyLength != 0) {
val h = new Array[Double](historyLength)
for (i <- 0 until historyLength) {
h(i) = in.readDouble()
}
c.history = h
}
counters.put(key, c)
addEntryToIndices(c)
}
}
def serialize(in: DataInputStream)(implicit serl: Serializer[T]) = load(in)
def serialize(out: DataOutputStream)(implicit serl: Serializer[T]) = save(out)
def discardEntriesAfter(timestamp: Long) = alone {
val batchSize = 10000
var entries = counters.values().asScala.filter(_.timestamp > timestamp).take(batchSize)
while (entries.nonEmpty) {
// debug("Removing batch of %d entries... ".format(entries.size))
entries.foreach(removeEntry)
entries = counters.values().asScala.filter(_.timestamp > timestamp).take(batchSize)
}
}
}
|
streamdrill/streamdrill-core
|
src/main/scala/streamdrill/core/ExpDecayTrend.scala
|
Scala
|
bsd-2-clause
| 13,838 |
package com.github.jparkie.spark.cassandra
import com.datastax.spark.connector.mapper.ColumnMapper
import org.apache.spark.rdd.RDD
import scala.reflect.runtime.universe._
package object rdd {
/**
* Implicitly lift a [[RDD]] with [[SparkCassRDDFunctions]].
*
* @param rdd A [[RDD]] to lift.
* @return Enriched [[RDD]] with [[SparkCassRDDFunctions]].
*/
implicit def sparkCassRDDFunctions[T: ColumnMapper: TypeTag](rdd: RDD[T]): SparkCassRDDFunctions[T] = {
new SparkCassRDDFunctions[T](rdd)
}
}
|
jparkie/Spark2Cassandra
|
src/main/scala/com/github/jparkie/spark/cassandra/rdd/package.scala
|
Scala
|
apache-2.0
| 522 |
/**
* Copyright (C) 2013 Orbeon, Inc.
*
* This program is free software; you can redistribute it and/or modify it under the terms of the
* GNU Lesser General Public License as published by the Free Software Foundation; either version
* 2.1 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Lesser General Public License for more details.
*
* The full text of the license is available at http://www.gnu.org/copyleft/lesser.html
*/
package org.orbeon.oxf.portlet.liferay
import collection.JavaConverters._
import com.liferay.portal.kernel.language.LanguageUtil
import com.liferay.portal.model.User
import javax.portlet._
import org.orbeon.oxf.util.ScalaUtils._
object LiferaySupport {
private val HeaderNamesGetters = List[(String, User ⇒ List[String])](
"Orbeon-Liferay-User-Id" → (u ⇒ Option(u.getUserId) map (_.toString) toList),
"Orbeon-Liferay-User-Screen-Name" → (u ⇒ Option(u.getScreenName).toList),
"Orbeon-Liferay-User-Full-Name" → (u ⇒ Option(u.getFullName).toList),
"Orbeon-Liferay-User-Email" → (u ⇒ Option(u.getEmailAddress).toList),
"Orbeon-Liferay-User-Group-Id" → (u ⇒ Option(u.getGroup) map (_.getGroupId.toString) toList),
"Orbeon-Liferay-User-Group-Name" → (u ⇒ Option(u.getGroup) map (_.getName) toList),
"Orbeon-Liferay-User-Roles" → (u ⇒ u.getRoles.asScala map (_.getName) toList)
)
// Return Liferay user, group and role information as headers. There can be multiple role headers.
def userHeaders(user: User): List[(String, String)] =
for {
(name, getter) ← HeaderNamesGetters
value ← getter(user)
} yield
name → value
def languageHeader(request: PortletRequest) =
nonEmptyOrNone(LanguageUtil.getLanguageId(request)) map ("Orbeon-Liferay-Language" →)
}
|
martinluther/orbeon-forms
|
src/main/scala/org/orbeon/oxf/portlet/liferay/LiferaySupport.scala
|
Scala
|
lgpl-2.1
| 2,106 |
package io.eels.util
import org.apache.hadoop.fs.{FileSystem, Path}
object HdfsMkdir {
def apply(path: Path, inheritPermissionsDefault: Boolean)(implicit fs: FileSystem): Unit = {
if (!fs.exists(path)) {
// iterate through the parents until we hit a parent that exists, then take that, which will give
// us the first folder that exists
val parent = Iterator.iterate(path)(_.getParent).dropWhile(false == fs.exists(_)).take(1).toList.head
// using the folder that exists, get its permissions
val permission = fs.getFileStatus(parent).getPermission
fs.create(path, false)
if (inheritPermissionsDefault)
fs.setPermission(path, permission)
}
}
}
|
eel-lib/eel
|
eel-core/src/main/scala/io/eels/util/HdfsMkdir.scala
|
Scala
|
mit
| 706 |
/*
* Copyright 2019 Spotify AB.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.spotify.scio.testing
import com.spotify.scio.coders.{Coder, CoderMaterializer}
import com.spotify.scio.streaming.ACCUMULATING_FIRED_PANES
import com.spotify.scio.values.WindowOptions
import org.apache.beam.sdk.Pipeline.PipelineExecutionException
import org.apache.beam.sdk.transforms.windowing.{
AfterProcessingTime,
AfterWatermark,
IntervalWindow,
Repeatedly
}
import org.apache.beam.sdk.values.TimestampedValue
import org.joda.time.{Duration, Instant}
import java.io.ObjectOutputStream
import scala.util.Try
import java.io.ObjectInputStream
import java.io.IOException
import java.io.NotSerializableException
import cats.kernel.Eq
import com.twitter.chill.Externalizer
import com.esotericsoftware.kryo.KryoSerializable
import com.esotericsoftware.kryo.Kryo
import com.esotericsoftware.kryo.io.{Input, Output}
import scala.annotation.unused
object SCollectionMatchersTest {
// intentionally not serializable to test lambda ser/de
class TestRecord(val x: Int) {
override def hashCode(): Int = x
override def equals(obj: Any): Boolean =
obj.isInstanceOf[TestRecord] && x == obj.asInstanceOf[TestRecord].x
}
}
final case class DoesNotSerialize[B](a: String, b: B) extends KryoSerializable with Serializable {
@throws(classOf[IOException])
private def writeObject(@unused o: ObjectOutputStream): Unit =
throw new NotSerializableException("DoesNotSerialize can't be serialized")
@throws(classOf[IOException])
private def readObject(@unused o: ObjectInputStream): Unit =
throw new NotSerializableException("DoesNotSerialize can't be serialized")
@throws(classOf[IOException])
def write(kryo: Kryo, output: Output): Unit =
throw new NotSerializableException("DoesNotSerialize can't be serialized")
@throws(classOf[IOException])
def read(kryo: Kryo, input: Input): Unit =
throw new NotSerializableException("DoesNotSerialize can't be serialized")
}
class SCollectionMatchersTest extends PipelineSpec {
import SCollectionMatchersTest.TestRecord
implicit val coder: Coder[TestRecord] = Coder.kryo[TestRecord]
private def newTR(x: Int) = new TestRecord(x)
"SCollectionMatchers" should "support containInAnyOrder" in {
// should cases
runWithContext {
_.parallelize(1 to 100) should containInAnyOrder(1 to 100)
}
an[AssertionError] should be thrownBy {
runWithContext {
_.parallelize(1 to 200) should containInAnyOrder(1 to 100)
}
}
an[AssertionError] should be thrownBy {
runWithContext {
_.parallelize(1 to 100) should containInAnyOrder(1 to 200)
}
}
// shouldNot cases
runWithContext {
_.parallelize(1 to 100) shouldNot containInAnyOrder(1 to 10)
}
runWithContext {
_.parallelize(1 to 10) shouldNot containInAnyOrder(1 to 100)
}
an[AssertionError] should be thrownBy {
runWithContext {
_.parallelize(1 to 100) shouldNot containInAnyOrder(1 to 100)
}
}
// lambda ser/de
runWithContext(_.parallelize(Seq(newTR(1))) should containInAnyOrder(Seq(newTR(1))))
runWithContext(_.parallelize(Seq(newTR(1))) shouldNot containInAnyOrder(Seq(newTR(2))))
}
it should "support containsInAnyOrder containing %" in {
val assertionError = intercept[AssertionError] {
runWithContext {
_.parallelize(Seq("1")) should containInAnyOrder(Seq("%1"))
}
}
// expected value
assertionError.getMessage should include("\\"%1\\"")
// actual value
assertionError.getMessage should include("<\\"1\\">")
}
it should "support containSingleValue" in {
// should cases
runWithContext(_.parallelize(Seq(1)) should containSingleValue(1))
an[AssertionError] should be thrownBy {
runWithContext(_.parallelize(Seq(1)) should containSingleValue(10))
}
a[PipelineExecutionException] should be thrownBy {
runWithContext(_.parallelize(1 to 10) should containSingleValue(1))
}
a[PipelineExecutionException] should be thrownBy {
runWithContext {
_.parallelize(Seq.empty[Int]) should containSingleValue(1)
}
}
// shouldNot cases
runWithContext(_.parallelize(Seq(10)) shouldNot containSingleValue(1))
an[AssertionError] should be thrownBy {
runWithContext(_.parallelize(Seq(1)) shouldNot containSingleValue(1))
}
a[PipelineExecutionException] should be thrownBy {
runWithContext(_.parallelize(1 to 10) shouldNot containSingleValue(1))
}
a[PipelineExecutionException] should be thrownBy {
runWithContext {
_.parallelize(Seq.empty[Int]) shouldNot containSingleValue(1)
}
}
// lambda ser/de
runWithContext(_.parallelize(Seq(newTR(1))) should containSingleValue(newTR(1)))
runWithContext(_.parallelize(Seq(newTR(1))) shouldNot containSingleValue(newTR(2)))
}
it should "support containValue" in {
// should cases
runWithContext(_.parallelize(Seq(1, 2, 3)) should containValue(1))
an[AssertionError] should be thrownBy {
runWithContext(_.parallelize(Seq(1)) should containValue(10))
}
// shouldNot cases
runWithContext(_.parallelize(Seq(1, 2, 3)) shouldNot containValue(4))
runWithContext(_.parallelize(Seq(1, 2, 3)) should not(containValue(4)))
an[AssertionError] should be thrownBy {
runWithContext(_.parallelize(Seq(1, 2, 3)) shouldNot containValue(1))
}
an[AssertionError] should be thrownBy {
runWithContext(_.parallelize(Seq(1, 2, 3)) should not(containValue(1)))
}
// lambda ser/de
runWithContext(_.parallelize(Seq(newTR(1), newTR(2))) should containValue(newTR(1)))
runWithContext(_.parallelize(Seq(newTR(1), newTR(2))) shouldNot containValue(newTR(3)))
}
it should "support beEmpty" in {
// should cases
runWithContext(_.parallelize(Seq.empty[Int]) should beEmpty)
an[AssertionError] should be thrownBy {
runWithContext(_.parallelize(1 to 10) should beEmpty)
}
// shouldNot cases
runWithContext(_.parallelize(1 to 10) shouldNot beEmpty)
runWithContext(_.parallelize(1 to 10) should not(beEmpty))
an[AssertionError] should be thrownBy {
runWithContext(_.parallelize(Seq.empty[Int]) shouldNot beEmpty)
}
an[AssertionError] should be thrownBy {
runWithContext(_.parallelize(Seq.empty[Int]) should not(beEmpty))
}
}
it should "support haveSize" in {
// should cases
runWithContext(_.parallelize(Seq.empty[Int]) should haveSize(0))
runWithContext(_.parallelize(Seq(1)) should haveSize(1))
runWithContext(_.parallelize(1 to 10) should haveSize(10))
an[AssertionError] should be thrownBy {
runWithContext(_.parallelize(Seq.empty[Int]) should haveSize(1))
}
an[AssertionError] should be thrownBy {
runWithContext(_.parallelize(Seq(1)) should haveSize(0))
}
an[AssertionError] should be thrownBy {
runWithContext(_.parallelize(1 to 10) should haveSize(20))
}
// shouldNot cases
runWithContext(_.parallelize(Seq.empty[Int]) shouldNot haveSize(1))
runWithContext(_.parallelize(Seq(1)) shouldNot haveSize(0))
runWithContext(_.parallelize(1 to 10) shouldNot haveSize(100))
an[AssertionError] should be thrownBy {
runWithContext(_.parallelize(Seq.empty[Int]) shouldNot haveSize(0))
}
an[AssertionError] should be thrownBy {
runWithContext(_.parallelize(Seq(1)) shouldNot haveSize(1))
}
an[AssertionError] should be thrownBy {
runWithContext(_.parallelize(1 to 10) shouldNot haveSize(10))
}
}
it should "support equalMapOf" in {
// should cases
val s = Seq("a" -> 1, "b" -> 2, "c" -> 3)
runWithContext { sc =>
sc.parallelize(s) should equalMapOf(s.toMap)
sc.parallelize(Seq.empty[(String, Int)]) should equalMapOf(Map.empty[String, Int])
}
an[AssertionError] should be thrownBy {
runWithContext {
_.parallelize(s) should equalMapOf((s :+ "d" -> 4).toMap)
}
}
an[AssertionError] should be thrownBy {
runWithContext(_.parallelize(s :+ "d" -> 4) should equalMapOf(s.toMap))
}
an[AssertionError] should be thrownBy {
runWithContext(_.parallelize(s) should equalMapOf(s.tail.toMap))
}
an[AssertionError] should be thrownBy {
runWithContext(_.parallelize(s.tail) should equalMapOf(s.toMap))
}
an[AssertionError] should be thrownBy {
runWithContext {
_.parallelize(s) should equalMapOf(s.toMap + ("a" -> 10))
}
}
an[AssertionError] should be thrownBy {
runWithContext {
_.parallelize(s.tail :+ ("a" -> 10)) should equalMapOf(s.toMap)
}
}
an[AssertionError] should be thrownBy {
runWithContext {
_.parallelize(s) should equalMapOf(Map.empty[String, Int])
}
}
an[AssertionError] should be thrownBy {
runWithContext {
_.parallelize(Seq.empty[(String, Int)]) should equalMapOf(s.toMap)
}
}
// shouldNot cases
runWithContext { sc =>
sc.parallelize(s) shouldNot equalMapOf((s :+ "d" -> 4).toMap)
sc.parallelize(s) shouldNot equalMapOf(Map.empty[String, Int])
sc.parallelize(Seq.empty[(String, Int)]) shouldNot equalMapOf(s.toMap)
}
an[AssertionError] should be thrownBy {
runWithContext(_.parallelize(s) shouldNot equalMapOf(s.toMap))
}
an[AssertionError] should be thrownBy {
runWithContext {
_.parallelize(Seq.empty[(String, Int)]) shouldNot equalMapOf(Map.empty[String, Int])
}
}
// lambda ser/de
val s2 = Seq("a" -> newTR(1), "b" -> newTR(2))
runWithContext(_.parallelize(s2) should equalMapOf(s2.toMap))
runWithContext(_.parallelize(s2) shouldNot equalMapOf((s2 :+ "c" -> newTR(3)).toMap))
}
it should "support satisfy" in {
// should cases
runWithContext {
_.parallelize(1 to 100) should satisfy[Int](_.sum == 5050)
}
an[AssertionError] should be thrownBy {
runWithContext {
_.parallelize(1 to 100) should satisfy[Int](_.sum == 100)
}
}
// shouldNot cases
runWithContext {
_.parallelize(1 to 100) shouldNot satisfy[Int](_.sum == 100)
}
an[AssertionError] should be thrownBy {
runWithContext {
_.parallelize(1 to 100) shouldNot satisfy[Int](_.sum == 5050)
}
}
// lambda ser/de
// FIXME: these will fail if TR in predicate is pulled in via closure, not sure if fixable
runWithContext {
_.parallelize(Seq(newTR(1))) should satisfy[TestRecord](_.toList.contains(newTR(1)))
}
runWithContext {
_.parallelize(Seq(newTR(1))) shouldNot satisfy[TestRecord](_.toList.contains(newTR(2)))
}
}
it should "support satisfySingleValue" in {
// should cases
runWithContext {
_.parallelize(Seq(1)) should satisfySingleValue[Int](_ == 1)
}
an[AssertionError] should be thrownBy {
runWithContext {
_.parallelize(Seq(1)) should satisfySingleValue[Int](_ == 10)
}
}
a[PipelineExecutionException] should be thrownBy {
runWithContext {
_.parallelize(1 to 10) should satisfySingleValue[Int](_ == 1)
}
}
a[PipelineExecutionException] should be thrownBy {
runWithContext {
_.parallelize(Seq.empty[Int]) should satisfySingleValue[Int](_ == 1)
}
}
// shouldNot cases
runWithContext {
_.parallelize(Seq(10)) shouldNot satisfySingleValue[Int](_ == 1)
}
an[AssertionError] should be thrownBy {
runWithContext {
_.parallelize(Seq(1)) shouldNot satisfySingleValue[Int](_ == 1)
}
}
a[PipelineExecutionException] should be thrownBy {
runWithContext {
_.parallelize(1 to 10) shouldNot satisfySingleValue[Int](_ == 1)
}
}
a[PipelineExecutionException] should be thrownBy {
runWithContext {
_.parallelize(Seq.empty[Int]) shouldNot satisfySingleValue[Int](_ == 1)
}
}
// lambda ser/de
// FIXME: these will fail if TR in predicate is pulled in via closure, not sure if fixable
runWithContext {
_.parallelize(Seq(newTR(1))) should satisfySingleValue[TestRecord](_ == newTR(1))
}
runWithContext {
_.parallelize(Seq(newTR(1))) shouldNot satisfySingleValue[TestRecord](_ == newTR(2))
}
}
it should "work when the content does not serialize (even using Externalizer)" in {
runWithContext { ctx =>
import CoderAssertions._
import org.apache.beam.sdk.util.SerializableUtils._
type DNS = DoesNotSerialize[Int]
val v = new DoesNotSerialize("foo", 42)
val coder = CoderMaterializer.beam(ctx, Coder[DNS])
assume(Try(ensureSerializable(v)).isFailure)
assume(Try(ensureSerializable(Externalizer(v))).isFailure)
assume(Try(ensureSerializableByCoder(coder, v, "?")).isSuccess)
v coderShould roundtrip()
coderIsSerializable[DNS]
val coll = ctx.parallelize(List(v))
coll shouldNot beEmpty // just make sure the SCollection can be built
// satisfy and satisfySingleValue will fail bc. the closure
// cant be serialized by a Coder
// coll should satisfy[DNS](_.head.a == v.a)
// coll should satisfySingleValue[DNS](_.a == v.a)
coll should containInAnyOrder(List(v))
coll should containSingleValue(v)
coll should containValue(v)
}
}
it should "support forAll" in {
// should cases
runWithContext(_.parallelize(1 to 100) should forAll[Int](_ > 0))
an[AssertionError] should be thrownBy {
runWithContext(_.parallelize(1 to 100) should forAll[Int](_ > 10))
}
// shouldNot cases
runWithContext(_.parallelize(1 to 100) shouldNot forAll[Int](_ > 10))
an[AssertionError] should be thrownBy {
runWithContext(_.parallelize(1 to 100) shouldNot forAll[Int](_ > 0))
}
// lambda ser/de
// FIXME: these will fail if TR in predicate is pulled in via closure, not sure if fixable
runWithContext(_.parallelize(Seq(newTR(1))) should forAll[TestRecord](_ == newTR(1)))
runWithContext(_.parallelize(Seq(newTR(1))) shouldNot forAll[TestRecord](_ == newTR(2)))
}
it should "support tolerance" in {
val xs = Seq(1.4, 1.5, 1.6)
// should cases
runWithContext(_.parallelize(xs) should forAll[Double](_ === 1.5 +- 0.1))
runWithContext(_.parallelize(xs) should exist[Double](_ === 1.5 +- 0.1))
runWithContext {
_.parallelize(xs) should satisfy[Double](_.sum === 5.0 +- 0.5)
}
an[AssertionError] should be thrownBy {
runWithContext {
_.parallelize(xs) should forAll[Double](_ === 1.4 +- 0.1)
}
}
an[AssertionError] should be thrownBy {
runWithContext {
_.parallelize(xs) should exist[Double](_ === 1.0 +- 0.1)
}
}
an[AssertionError] should be thrownBy {
runWithContext {
_.parallelize(xs) should satisfy[Double](_.sum === 1.0 +- 0.5)
}
}
// shouldNot cases
runWithContext {
_.parallelize(xs) shouldNot forAll[Double](_ === 1.4 +- 0.1)
}
runWithContext {
_.parallelize(xs) shouldNot exist[Double](_ === 1.0 +- 0.1)
}
runWithContext {
_.parallelize(xs) shouldNot satisfy[Double](_.sum === 1.0 +- 0.5)
}
an[AssertionError] should be thrownBy {
runWithContext {
_.parallelize(xs) shouldNot forAll[Double](_ === 1.5 +- 0.1)
}
}
an[AssertionError] should be thrownBy {
runWithContext {
_.parallelize(xs) shouldNot exist[Double](_ === 1.5 +- 0.1)
}
}
an[AssertionError] should be thrownBy {
runWithContext {
_.parallelize(xs) shouldNot satisfy[Double](_.sum === 5.0 +- 0.5)
}
}
}
it should "support exist" in {
// should cases
runWithContext(_.parallelize(1 to 100) should exist[Int](_ > 99))
an[AssertionError] should be thrownBy {
runWithContext(_.parallelize(1 to 100) should exist[Int](_ > 100))
}
// shouldNot cases
runWithContext(_.parallelize(1 to 100) shouldNot exist[Int](_ > 100))
an[AssertionError] should be thrownBy {
runWithContext(_.parallelize(1 to 100) shouldNot exist[Int](_ > 99))
}
// lambda ser/de
// FIXME: these will fail if TR in predicate is pulled in via closure, not sure if fixable
runWithContext(_.parallelize(Seq(newTR(1))) should exist[TestRecord](_ == newTR(1)))
runWithContext(_.parallelize(Seq(newTR(1))) shouldNot exist[TestRecord](_ == newTR(2)))
}
it should "support windowing" in {
val allowedLateness = Duration.standardHours(1)
val teamWindowDuration = Duration.standardMinutes(20)
val baseTime = new Instant(0)
def event[A](elem: A, baseTimeOffset: Duration): TimestampedValue[A] =
TimestampedValue.of(elem, baseTime.plus(baseTimeOffset))
val stream = testStreamOf[Int]
// Start at the epoch
.advanceWatermarkTo(baseTime)
// add some elements ahead of the watermark
.addElements(
event(1, Duration.standardSeconds(3)),
event(2, Duration.standardMinutes(1)),
event(3, Duration.standardSeconds(22)),
event(4, Duration.standardSeconds(3))
)
// The watermark advances slightly, but not past the end of the window
.advanceWatermarkTo(baseTime.plus(Duration.standardMinutes(3)))
.addElements(event(1, Duration.standardMinutes(4)), event(2, Duration.standardSeconds(270)))
runWithContext { sc =>
val windowedStream = sc
.testStream(stream.advanceWatermarkToInfinity())
.withFixedWindows(
teamWindowDuration,
options = WindowOptions(
trigger = Repeatedly
.forever(
AfterWatermark
.pastEndOfWindow()
.withEarlyFirings(
AfterProcessingTime
.pastFirstElementInPane()
.plusDelayOf(Duration.standardMinutes(5))
)
.withLateFirings(
AfterProcessingTime
.pastFirstElementInPane()
.plusDelayOf(Duration.standardMinutes(10))
)
),
accumulationMode = ACCUMULATING_FIRED_PANES,
allowedLateness = allowedLateness
)
)
.withTimestamp
val window = new IntervalWindow(baseTime, teamWindowDuration)
windowedStream.groupByKey.keys should inOnTimePane(window) {
containInAnyOrder(Seq(1, 2, 3, 4))
}
windowedStream.groupByKey should inFinalPane(window) {
haveSize(4)
}
windowedStream.map(_._1).sum should inOnlyPane(window) {
containSingleValue(13)
}
a[ClassCastException] should be thrownBy {
windowedStream.groupByKey should inEarlyGlobalWindowPanes {
haveSize(4)
}
}
windowedStream.groupByKey should inWindow(window) {
forAll[(Int, Iterable[Instant])] { case (_, seq) =>
seq.nonEmpty
}
}
}
runWithContext { sc =>
val windowedStream = sc
.testStream(
stream
.advanceProcessingTime(Duration.standardMinutes(21))
.advanceWatermarkToInfinity()
)
.withGlobalWindow(
options = WindowOptions(
trigger = Repeatedly.forever(
AfterWatermark
.pastEndOfWindow()
.withEarlyFirings(
AfterProcessingTime
.pastFirstElementInPane()
.plusDelayOf(Duration.standardMinutes(5))
)
),
accumulationMode = ACCUMULATING_FIRED_PANES,
allowedLateness = allowedLateness
)
)
windowedStream.sum should inEarlyGlobalWindowPanes {
containInAnyOrder(Iterable(13))
}
windowedStream.sum shouldNot inEarlyGlobalWindowPanes {
beEmpty
}
}
}
it should "support late pane windowing" in {
val baseTime = new Instant(0)
val windowDuration = Duration.standardMinutes(10)
val allowedLateness = Duration.standardMinutes(5)
val stream = testStreamOf[Int]
// Start at the epoch
.advanceWatermarkTo(baseTime)
// On-time element
.addElements(
TimestampedValue.of(1, baseTime.plus(Duration.standardMinutes(1)))
)
// Advance watermark to the end of window
.advanceWatermarkTo(baseTime.plus(windowDuration))
.addElements(
// Late element in allowed lateness
TimestampedValue.of(2, baseTime.plus(Duration.standardMinutes(9)))
)
.advanceWatermarkToInfinity()
runWithContext { sc =>
val windowedStream = sc
.testStream(stream)
.withFixedWindows(
windowDuration,
options = WindowOptions(
trigger = AfterWatermark
.pastEndOfWindow()
.withLateFirings(
AfterProcessingTime.pastFirstElementInPane()
),
accumulationMode = ACCUMULATING_FIRED_PANES,
allowedLateness = allowedLateness
)
)
.sum
val window = new IntervalWindow(baseTime, windowDuration)
windowedStream should inOnTimePane(window) {
containSingleValue(1)
}
windowedStream should inLatePane(window) {
containSingleValue(1 + 2)
}
}
}
it should "customize equality" in {
val in = 1 to 10
val out = 2 to 11
runWithContext {
_.parallelize(in) shouldNot containInAnyOrder(out)
}
runWithContext {
implicit val eqW = Eqv.always
_.parallelize(in) should containInAnyOrder(out)
}
runWithContext {
_.parallelize(List(1)) shouldNot containSingleValue(2)
}
runWithContext {
implicit val eqW = Eqv.always
_.parallelize(List(1)) should containSingleValue(2)
}
runWithContext {
_.parallelize(List(1, 3, 4, 5)) shouldNot containValue(2)
}
runWithContext {
implicit val eqW = Eqv.always
_.parallelize(List(1, 3, 4, 5)) should containValue(2)
}
}
}
object Eqv {
def always: Eq[Int] =
new Eq[Int] {
def eqv(x: Int, y: Int): Boolean =
true
}
}
|
spotify/scio
|
scio-test/src/test/scala/com/spotify/scio/testing/SCollectionMatchersTest.scala
|
Scala
|
apache-2.0
| 22,815 |
package com.verisign.hadoopio.integration
import java.io.File
import java.nio.file.Paths
import com.verisign.hadoopio.logging.LazyLogging
import com.verisign.hadoopio.testing.Testing
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, FileUtil, Path}
import org.apache.hadoop.hdfs.{DFSConfigKeys, HdfsConfiguration, MiniDFSCluster}
import org.scalatest._
/**
* A base test spec for tests that need to talk to HDFS.
*/
@DoNotDiscover
class HdfsSpec extends FunSpec with Matchers with GivenWhenThen with BeforeAndAfterAll with LazyLogging {
private val MiniDfsBaseDir = Paths.get(Testing.TempDirectory, "hdfs").toString
private val NameNodeEditsDir = Paths.get(Testing.TempDirectory, "nn-edits").toString
private val DataNodeDataDir = Paths.get(Testing.TempDirectory, "dn-data").toString
protected val clusterConfiguration: Configuration = {
val conf = new HdfsConfiguration
logger.debug(s"Using local directory $MiniDfsBaseDir as HDFS base directory for MiniDFSCluster")
conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, MiniDfsBaseDir)
logger.debug(s"Using local directory $NameNodeEditsDir as edits directory for NameNode")
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, NameNodeEditsDir)
logger.debug(s"Using local directory $DataNodeDataDir as data directory for DataNode")
conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, DataNodeDataDir)
conf
}
protected var dfsCluster: MiniDFSCluster = null
implicit protected var fs: FileSystem = null
override def beforeAll() {
if (dfsCluster == null) {
dfsCluster = new MiniDFSCluster.Builder(clusterConfiguration).numDataNodes(2).build()
dfsCluster.waitClusterUp()
dfsCluster.waitActive()
fs = dfsCluster.getFileSystem
}
}
override def afterAll() {
if (dfsCluster != null) {
dfsCluster.shutdown()
dfsCluster = null
fs = null
FileUtil.fullyDelete(new File(Testing.TempDirectory))
()
}
}
describe("HdfsSpec") {
it("should provide access to an in-memory HDFS cluster") {
Testing.createPath(new Path("/hdfs/foo"))
}
}
}
|
verisign/hadoopio
|
src/test/scala/com/verisign/hadoopio/integration/HdfsSpec.scala
|
Scala
|
apache-2.0
| 2,149 |
package scala.scalanative
package util
import language.implicitConversions
class ScopedVar[A] {
import ScopedVar.Assignment
private var init = false
private var value: A = _
def get: A = if (!init) throw ScopedVar.Unitialized() else value
def :=(newValue: A): Assignment[A] = new Assignment(this, newValue)
}
object ScopedVar {
case class Unitialized() extends Exception
class Assignment[T](scVar: ScopedVar[T], value: T) {
private[ScopedVar] def push(): AssignmentStackElement[T] = {
val stack = new AssignmentStackElement(scVar, scVar.init, scVar.value)
scVar.init = true
scVar.value = value
stack
}
}
private class AssignmentStackElement[T](scVar: ScopedVar[T],
oldInit: Boolean,
oldValue: T) {
private[ScopedVar] def pop(): Unit = {
scVar.init = oldInit
scVar.value = oldValue
}
}
implicit def toValue[T](scVar: ScopedVar[T]): T = scVar.get
def scoped[T](ass: Assignment[_]*)(body: => T): T = {
val stack = ass.map(_.push())
try body
finally stack.reverse.foreach(_.pop())
}
}
|
cedricviaccoz/scala-native
|
util/src/main/scala/scala/scalanative/util/ScopedVar.scala
|
Scala
|
bsd-3-clause
| 1,192 |
/*
* Copyright (c) 2014 - 2015 Contributor. All rights reserved.
*/
package org.scalaide.debug.internal.expression
import scala.reflect.runtime.universe
import scala.runtime.RichBoolean
import scala.runtime.RichByte
import scala.runtime.RichChar
import scala.runtime.RichDouble
import scala.runtime.RichFloat
import scala.runtime.RichInt
import scala.runtime.RichLong
import scala.runtime.RichShort
import org.scalaide.debug.internal.expression.context.JdiContext
import org.scalaide.debug.internal.expression.proxies.JdiProxy
import org.scalaide.debug.internal.expression.proxies.primitives.BooleanJdiProxy
import org.scalaide.debug.internal.expression.proxies.primitives.ByteJdiProxy
import org.scalaide.debug.internal.expression.proxies.primitives.CharJdiProxy
import org.scalaide.debug.internal.expression.proxies.primitives.DoubleJdiProxy
import org.scalaide.debug.internal.expression.proxies.primitives.FloatJdiProxy
import org.scalaide.debug.internal.expression.proxies.primitives.IntJdiProxy
import org.scalaide.debug.internal.expression.proxies.primitives.LongJdiProxy
import org.scalaide.debug.internal.expression.proxies.primitives.ShortJdiProxy
/**
* Names of types and methods used in expression evaluator.
*/
object Names {
/**
* Java names.
*/
object Java {
/**
* Names of Java unboxed types, used in multiple places.
*/
object primitives {
val boolean = "boolean"
val int = "int"
val double = "double"
val float = "float"
val char = "char"
val long = "long"
val byte = "byte"
val short = "short"
val void = "void"
/** Regex for matching Java arrays */
val Array = """(.+)\\[\\]""".r
def Array(typeName: String) = typeName + "[]"
}
/**
* Names of Java boxed types, used in multiple places.
*/
object boxed {
val Boolean = classOf[java.lang.Boolean].getName
val Integer = classOf[java.lang.Integer].getName
val Double = classOf[java.lang.Double].getName
val Float = classOf[java.lang.Float].getName
val Character = classOf[java.lang.Character].getName
val Long = classOf[java.lang.Long].getName
val Byte = classOf[java.lang.Byte].getName
val Short = classOf[java.lang.Short].getName
val Void = classOf[java.lang.Void].getName
val all = Set(Integer, Double, Float, Long, Character, Boolean, Byte, Short, Void)
}
val Object = classOf[java.lang.Object].getName
val String = classOf[java.lang.String].getName
}
/**
* Scala names.
*/
object Scala {
val scalaPackageTermName = universe.TermName("scala")
val constructorMethodName = "<init>"
val equalsMethodName = "=="
val notEqualsMethodName = "!="
val emptyType = "<none>"
val wildcardType = "?"
val nothingType = "scala.Nothing"
val simpleNothingType = "Nothing"
val boxedUnitType = classOf[scala.runtime.BoxedUnit].getName
val unitType = "scala.Unit"
val unitLiteral = "()"
val nullType = "scala.Null"
val nullLiteral = "null"
val arrayType = "scala.Array"
val partialFunctionType = classOf[PartialFunction[_, _]].getName
/** Supported methods from `scala.Dynamic` */
val dynamicTraitMethods = Set(
"updateDynamic",
"selectDynamic",
"applyDynamic")
val :: = "scala.collection.immutable.::"
val seq = "scala.collection.Seq"
val rangeInclusive = "scala.collection.immutable.Range$Inclusive"
val range = "scala.collection.immutable.Range"
/**
* Regex for matching Scala arrays.
* Matches both `Array[A]` and `scala.Array[A]` and extracts A to group.
*/
val Array = """(?:scala\\.)?Array\\[(.+)\\]""".r
def Array(typeName: String) = s"$ArrayRoot[$typeName]"
val ArrayRoot = "scala.Array"
val nil = "scala.collection.immutable.Nil"
// strange value that shows up instead of above one
val thisNil = "immutable.this.Nil"
val list = "scala.collection.immutable.List"
// strange value that shows up instead of above one
val thisList = "immutable.this.List"
/**
* Names of Scala unified types, used in multiple places.
*/
object primitives {
val Int = "scala.Int"
val Double = "scala.Double"
val Float = "scala.Float"
val Long = "scala.Long"
val Char = "scala.Char"
val Boolean = "scala.Boolean"
val Byte = "scala.Byte"
val Short = "scala.Short"
val Unit = "scala.Unit"
val all = Set(Int, Double, Float, Long, Char, Boolean, Byte, Short, Unit)
val allShorten = all.map(_.drop("scala.".size))
}
/**
* Scala rich types wrappers.
*/
object rich {
val Boolean = classOf[scala.runtime.RichBoolean].getName
val Byte = classOf[scala.runtime.RichByte].getName
val Char = classOf[scala.runtime.RichChar].getName
val Double = classOf[scala.runtime.RichDouble].getName
val Float = classOf[scala.runtime.RichFloat].getName
val Int = classOf[scala.runtime.RichInt].getName
val Long = classOf[scala.runtime.RichLong].getName
val Short = classOf[scala.runtime.RichShort].getName
val all = Set(Int, Double, Float, Long, Char, Boolean, Byte, Short)
}
}
/**
* Names specific to debugger itself.
*/
object Debugger {
/** Type used to show for custom user-defined lambdas */
val lambdaType = "<custom_lambda>"
val primitiveValueOfProxyMethodName = "__primitiveValue"
/** JdiProxy - in all variants */
val proxyName = classOf[JdiProxy].getSimpleName
val proxyFullName = classOf[JdiProxy].getName
val proxySpecialMethods = Scala.dynamicTraitMethods ++ List("$eq$eq", "$bang$eq", "$plus", "apply", primitiveValueOfProxyMethodName)
val proxyGenericApplyMethodName = "applyWithGenericType"
def ArrayJdiProxy(typeName: String) = s"ArrayJdiProxy[$typeName]"
/** JdiContext in all variants */
val contextName = classOf[JdiContext].getSimpleName
val contextFullName = classOf[JdiContext].getName
/** Name of placeholder method, used in reflective compilation. */
val placeholderName = "placeholder"
/** Name of placeholder method for nested method, used in reflective compilation. */
val placeholderNestedMethodName = "placeholderNestedMethod"
/** Name of placeholder function method, used in reflective compilation. */
val placeholderPartialFunctionName = "placeholderPartialFunction"
/** Name of placeholder partial function method, used in reflective compilation. */
val placeholderFunctionName = "placeholderFunction"
/** Name of placeholder function for obtaining arguments types */
val placeholderArgsName = "placeholderArgs"
/** Name of placeholder function for handling `super` */
val placeholderSuperName = "placeholderSuper"
/** Name of proxy method, used in reflective compilation. */
val proxyMethodName = "proxy"
/** Name of proxy method, used in reflective compilation. */
val valueProxyMethodName = "valueProxy"
/** Name of proxy method, used in reflective compilation. */
val objectOrStaticCallProxyMethodName = "objectOrStaticCallProxy"
/** Name of proxy method, used in reflective compilation. */
val classOfProxyMethodName = "classOfProxy"
/** Name of stringify method, used in reflective compilation. */
val stringifyMethodName = "stringify"
/** Name of `isInstanceOf` method, used in reflective compilation. */
val isInstanceOfMethodName = "isInstanceOfCheck"
/** Name of generateHashCode method, used in reflective compilation. */
val hashCodeMethodName = "generateHashCode"
/** Name of method for setting local variables values, used in reflective compilation. */
val setLocalVariable = "setLocalVariable"
/** Name of context val on top level function for expression. */
val contextParamName = "__context"
/** Name of `this` stub. */
val thisValName = "__this"
/** Name of this proxy method, used in reflective compilation. */
val thisObjectProxyMethodName = "thisObjectProxy"
/** Name of invoke method method. */
val invokeMethodName = "invokeMethod"
/** Name of constructor method */
val newInstance = "newInstance"
val newClassContextName = "newClassContext"
val proxyContextName = "proxyContextParam"
val boxedProxiesNames = Seq(
classOf[BooleanJdiProxy],
classOf[ByteJdiProxy],
classOf[CharJdiProxy],
classOf[DoubleJdiProxy],
classOf[FloatJdiProxy],
classOf[IntJdiProxy],
classOf[LongJdiProxy],
classOf[ShortJdiProxy])
.map(_.getSimpleName)
val newClassName = "CustomFunction"
val objectProxyForFieldMethodName = "objectProxyForField"
}
}
|
andrey-ilinykh/scala-ide
|
org.scala-ide.sdt.debug.expression/src/org/scalaide/debug/internal/expression/Names.scala
|
Scala
|
bsd-3-clause
| 8,812 |
/**
* Copyright (C) 2016 DANS - Data Archiving and Networked Services ([email protected])
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package nl.knaw.dans.easy.multideposit.model
object DcType extends Enumeration {
type DcType = Value
// @formatter:off
val COLLECTION: Value = Value("Collection")
val DATASET: Value = Value("Dataset")
val EVENT: Value = Value("Event")
val IMAGE: Value = Value("Image")
val INTERACTIVERESOURCE: Value = Value("InteractiveResource")
val MOVINGIMAGE: Value = Value("MovingImage")
val PHYSICALOBJECT: Value = Value("PhysicalObject")
val SERVICE: Value = Value("Service")
val SOFTWARE: Value = Value("Software")
val SOUND: Value = Value("Sound")
val STILLIMAGE: Value = Value("StillImage")
val TEXT: Value = Value("Text")
// @formatter:on
def valueOf(s: String): Option[DcType.Value] = {
DcType.values.find(_.toString equalsIgnoreCase s.replace(" ", ""))
}
}
|
DANS-KNAW/easy-split-multi-deposit
|
src/main/scala/nl.knaw.dans.easy.multideposit/model/DcType.scala
|
Scala
|
apache-2.0
| 1,567 |
/*
* Wire
* Copyright (C) 2016 Wire Swiss GmbH
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package com.waz.utils
import java.util.concurrent.ConcurrentLinkedQueue
import java.util.concurrent.atomic.AtomicBoolean
import com.waz.log.BasicLogging.LogTag
import com.waz.log.LogSE._
import com.waz.model.Event
import com.waz.threading.{CancellableFuture, SerialDispatchQueue, Threading}
import scala.collection.mutable
import scala.concurrent.Future
import scala.concurrent.duration._
import scala.reflect.ClassTag
trait EventProcessingQueue[A <: Event] {
protected implicit val evClassTag: ClassTag[A]
protected val selector: A => Boolean = { _ => true }
def enqueue(event: A): Future[Any]
def enqueue(events: Seq[A]): Future[Any]
def enqueueEvent(event: Event): Future[Any] = event match {
case ev: A if selector(ev) => enqueue(ev)
case _ => Future.successful(()) // ignore
}
def enqueueEvents(events: Seq[Event]): Future[Any] = enqueue(events collect { case ev: A if selector(ev) => ev })
}
object EventProcessingQueue {
def apply[A <: Event : ClassTag, B](eventProcessor: A => Future[B]) = {
val classTag = implicitly[ClassTag[A]]
new EventProcessingQueue[A] {
import Threading.Implicits.Background
override protected implicit val evClassTag = classTag
override def enqueue(event: A): Future[Any] = eventProcessor(event)
override def enqueue(events: Seq[A]): Future[Any] = Future.traverse(events)(eventProcessor)
}
}
}
class SerialEventProcessingQueue[A <: Event](processor: Seq[A] => Future[Any], name: String = "")(implicit val evClassTag: ClassTag[A]) extends SerialProcessingQueue[A](processor, name) with EventProcessingQueue[A]
class GroupedEventProcessingQueue[A <: Event, Key](groupBy: A => Key, processor: (Key, Seq[A]) => Future[Any], name: String = "")(implicit val evClassTag: ClassTag[A]) extends EventProcessingQueue[A] {
private implicit val dispatcher = new SerialDispatchQueue(name = s"GroupedEventProcessingQueue[${evClassTag.runtimeClass.getSimpleName}]")
private val queues = new mutable.HashMap[Key, SerialProcessingQueue[A]]
private def queue(key: Key) = queues.getOrElseUpdate(key, new SerialProcessingQueue[A](processor(key, _), s"${name}_$key"))
override def enqueue(event: A): Future[Any] = Future(queue(groupBy(event))).flatMap(_.enqueue(event))
override def enqueue(events: Seq[A]): Future[Vector[Any]] =
Future.traverse(events.groupBy(groupBy).toVector) { case (key, es) => Future(queue(key)).flatMap(_.enqueue(es)) }
def post[T](k: Key)(task: => Future[T]) = Future {
queue(k).post(task)
} flatMap identity
}
class SerialProcessingQueue[A](processor: Seq[A] => Future[Any], name: String = "") {
private implicit val logTag: LogTag = LogTag(name)
private val queue = new ConcurrentLinkedQueue[A]()
def enqueue(event: A): Future[Any] = {
queue.offer(event)
processQueue()
}
def !(event: A) = enqueue(event)
def enqueue(events: Seq[A]): Future[Any] = if (events.nonEmpty) {
events.foreach(queue.offer)
processQueue()
} else
Future.successful(())
protected def processQueue(): Future[Any] = {
verbose(l"SYNC processQueue")
post(processQueueNow())
}
protected def processQueueNow(): Future[Any] = {
val events = Iterator.continually(queue.poll()).takeWhile(_ != null).toVector
verbose(l"SYNC processQueueNow, events: $events")
if (events.nonEmpty) processor(events).recoverWithLog()
else Future.successful(())
}
// post some task on this queue, effectively blocking all other processing while this task executes
def post[T](f: => Future[T]): Future[T] = Serialized.future(this)(f)
/* just for tests! */
def clear(): Unit = queue.clear()
}
class ThrottledProcessingQueue[A](delay: FiniteDuration, processor: Seq[A] => Future[Any], name: String = "") extends SerialProcessingQueue[A](processor, name) {
private implicit val dispatcher = new SerialDispatchQueue(name = if (name.isEmpty) "ThrottledProcessingQueue_" + hashCode() else name)
private val waiting = new AtomicBoolean(false)
@volatile private var waitFuture: CancellableFuture[Any] = CancellableFuture.successful(())
private var lastDispatched = 0L
private implicit val logTag: LogTag = LogTag(name)
override protected def processQueue(): Future[Any] =
if (waiting.compareAndSet(false, true)) {
post {
val d = math.max(0, lastDispatched - System.currentTimeMillis() + delay.toMillis)
verbose(l"SYNC processQueue, delaying: $d millis")
waitFuture = CancellableFuture.delay(d.millis)
if (!waiting.get()) waitFuture.cancel()(logTag) // to avoid race conditions with `flush`
waitFuture.future.flatMap { _ =>
CancellableFuture.lift(processQueueNow())
} .recover {
case e: Throwable => waiting.set(false)
}
}
} else waitFuture.future
override protected def processQueueNow(): Future[Any] = {
waiting.set(false)
lastDispatched = System.currentTimeMillis()
super.processQueueNow()
}
def flush() = {
waiting.set(false)
waitFuture.cancel()(logTag)
post {
processQueueNow()
}
}
}
|
wireapp/wire-android-sync-engine
|
zmessaging/src/main/scala/com/waz/utils/EventProcessingQueue.scala
|
Scala
|
gpl-3.0
| 5,805 |
package akka.ainterface.test.arbitrary
import java.util.concurrent.TimeUnit
import org.scalacheck.{Arbitrary, Gen}
import scala.concurrent.duration.FiniteDuration
trait DurationArbitrary {
implicit val arbFiniteDuration: Arbitrary[FiniteDuration] = Arbitrary(genFiniteDuration())
def genFiniteDuration(minMillis: Int = 1, maxMillis: Int = Int.MaxValue): Gen[FiniteDuration] = {
Gen.chooseNum(minMillis, maxMillis).map(FiniteDuration(_, TimeUnit.MILLISECONDS))
}
}
|
ainterface/ainterface
|
ainterface/src/test/scala/akka/ainterface/test/arbitrary/DurationArbitrary.scala
|
Scala
|
apache-2.0
| 477 |
package scalax.collection.io.json
package imp
import net.liftweb.json._
import scalax.collection.GraphEdge.{EdgeLike, EdgeCompanionBase}
import error.JsonGraphError._, descriptor._, descriptor.Defaults._
// ---------------------------- data structures representing the parse result
protected[json]
sealed abstract class JsonList(jsonValues: List[JValue])
extends Iterable[JValue]
{
override def iterator = jsonValues.iterator
}
protected[json]
sealed abstract class ElemList(elemTypeId: String,
elems: List[JValue])
extends JsonList(elems)
{
def toString(nodeOrEdge: String) =
"Json" + (if (elemTypeId.isEmpty) "Default" + nodeOrEdge else elemTypeId) + "List" +
"(" + mkString(",") + ")"
}
protected[json]
case class NodeList protected[imp] (val nodeTypeId: String,
val nodes: List[JValue])
extends ElemList(nodeTypeId, nodes)
{
override def toString = toString("Node")
}
protected[json]
case class EdgeList protected[imp] (val edgeTypeId: String,
val edges: List[JValue])
extends ElemList(edgeTypeId, edges)
{
override def toString = toString("Edge")
}
object Parser {
// ----------------------------------- parsing JSON text to NodeList/EdgeList
def parse[N,C <: EdgeCompanionBase[EdgeLike]]
(json: String,
descriptor: Descriptor[N]): List[ElemList] =
{
val jsonAST = JsonParser.parse(json)
for (JField(name, values) <- jsonAST
if descriptor.sectionIds contains name) yield
{
def makeList(elemTypeId: String, arr: List[JValue]) =
if (descriptor.sectionIds.isNodes(name))
if (descriptor.nodeDescriptor(elemTypeId).isEmpty)
throw err(InvalidElemTypeId, elemTypeId)
else
NodeList(elemTypeId, arr)
else
if (descriptor.edgeDescriptor(elemTypeId).isEmpty)
throw err(InvalidElemTypeId, elemTypeId)
else
EdgeList(elemTypeId, arr)
var elemTypeId = defaultId
val elemList = values match {
case JObject(obj) =>
if (obj.size != 1) err(ObjectSizeNEQ1)
obj.head match {
case JField(elemTypeId, value) =>
value match {
case JArray(arr) => makeList(elemTypeId, arr)
case _ => throw err(NonArray, value.toString, value.getClass.toString)
}
}
case JArray (arr) => makeList(elemTypeId, arr)
case _ => throw err(NonObjArrValue, values.toString, values.getClass.toString)
}
elemList
}
}
}
|
opyate/scala-graph
|
json/src/main/scala/scalax/collection/io/json/imp/Parser.scala
|
Scala
|
bsd-3-clause
| 2,693 |
package org.jetbrains.plugins.scala
package lang
package psi
package api
package expr
import com.intellij.lang.ASTNode
import com.intellij.psi.scope.PsiScopeProcessor
import com.intellij.psi.tree.TokenSet
import com.intellij.psi.util.PsiTreeUtil
import com.intellij.psi.{PsiElement, ResolveState}
import org.jetbrains.plugins.scala.lang.lexer.ScalaTokenTypes
import org.jetbrains.plugins.scala.lang.psi.api.base.ScFieldId
import org.jetbrains.plugins.scala.lang.psi.api.base.patterns.{ScBindingPattern, ScCaseClause, ScCaseClauses}
import org.jetbrains.plugins.scala.lang.psi.api.statements.params.ScParameter
import org.jetbrains.plugins.scala.lang.psi.api.statements.{ScDeclaredElementsHolder, ScFunction, ScTypeAlias}
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.ScTypedDefinition
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.templates.ScTemplateBody
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef._
import org.jetbrains.plugins.scala.lang.psi.impl.{ScalaPsiElementFactory, ScalaPsiManager}
import org.jetbrains.plugins.scala.lang.psi.types._
import org.jetbrains.plugins.scala.lang.psi.types.nonvalue.TypeParameter
import org.jetbrains.plugins.scala.lang.psi.types.result.{Failure, Success, TypeResult, TypingContext}
import scala.collection.immutable.HashSet
import scala.collection.mutable
/**
* Author: ilyas, alefas
*/
trait ScBlock extends ScExpression with ScDeclarationSequenceHolder with ScImportsHolder {
protected override def innerType(ctx: TypingContext): TypeResult[ScType] = {
if (hasCaseClauses) {
val caseClauses = findChildByClassScala(classOf[ScCaseClauses])
val clauses: Seq[ScCaseClause] = caseClauses.caseClauses
val clausesType = clauses.foldLeft(types.Nothing: ScType)((tp, clause) => Bounds.lub(tp, clause.expr match {
case Some(expr) => expr.getType(TypingContext.empty).getOrNothing
case _ => types.Nothing
}))
getContext match {
case c: ScCatchBlock =>
val manager = ScalaPsiManager.instance(getProject)
val funs = manager.getCachedClasses(getResolveScope, "scala.PartialFunction")
val fun = funs.find(_.isInstanceOf[ScTrait]).getOrElse(return Failure("Cannot find PartialFunction class", Some(this)))
val throwable = manager.getCachedClass(getResolveScope, "java.lang.Throwable").orNull
if (throwable == null) return Failure("Cannot find Throwable class", Some(this))
return Success(ScParameterizedType(ScDesignatorType(fun), Seq(ScDesignatorType(throwable), clausesType)), Some(this))
case _ =>
val et = expectedType(fromUnderscore = false).getOrElse(return Failure("Cannot infer type without expected type", Some(this)))
return et match {
case f@ScFunctionType(_, params) =>
Success(ScFunctionType(clausesType, params.map(_.removeVarianceAbstracts(1)))
(getProject, getResolveScope), Some(this))
case f@ScPartialFunctionType(_, param) =>
Success(ScPartialFunctionType(clausesType, param.removeVarianceAbstracts(1))
(getProject, getResolveScope), Some(this))
case _ =>
Failure("Cannot infer type without expected type of scala.FunctionN or scala.PartialFunction", Some(this))
}
}
}
val inner = lastExpr match {
case None =>
ScalaPsiUtil.fileContext(this) match {
case scalaFile: ScalaFile if scalaFile.isCompiled => Nothing
case _ => Unit
}
case Some(e) =>
val m = new mutable.HashMap[String, ScExistentialArgument]
def existize(t: ScType, visited: HashSet[ScType]): ScType = {
if (visited.contains(t)) return t
val visitedWithT = visited + t
t match {
case ScDesignatorType(p: ScParameter) if p.owner.isInstanceOf[ScFunctionExpr] && p.owner.asInstanceOf[ScFunctionExpr].result == Some(this) =>
val t = existize(p.getType(TypingContext.empty).getOrAny, visitedWithT)
m.put(p.name, new ScExistentialArgument(p.name, Nil, t, t))
new ScTypeVariable(p.name)
case ScDesignatorType(typed: ScBindingPattern) if typed.nameContext.isInstanceOf[ScCaseClause] &&
typed.nameContext.asInstanceOf[ScCaseClause].expr == Some(this) =>
val t = existize(typed.getType(TypingContext.empty).getOrAny, visitedWithT)
m.put(typed.name, new ScExistentialArgument(typed.name, Nil, t, t))
new ScTypeVariable(typed.name)
case ScDesignatorType(des) if PsiTreeUtil.isContextAncestor(this, des, true) => des match {
case obj: ScObject =>
val t = existize(leastClassType(obj), visitedWithT)
m.put(obj.name, new ScExistentialArgument(obj.name, Nil, t, t))
new ScTypeVariable(obj.name)
case clazz: ScTypeDefinition =>
val t = existize(leastClassType(clazz), visitedWithT)
val vars = clazz.typeParameters.map {tp => ScalaPsiManager.typeVariable(tp)}.toList
m.put(clazz.name, new ScExistentialArgument(clazz.name, vars, t, t))
new ScTypeVariable(clazz.name)
case typed: ScTypedDefinition =>
val t = existize(typed.getType(TypingContext.empty).getOrAny, visitedWithT)
m.put(typed.name, new ScExistentialArgument(typed.name, Nil, t, t))
new ScTypeVariable(typed.name)
case _ => t
}
case proj@ScProjectionType(p, elem, s) => ScProjectionType(existize(p, visitedWithT), elem, s)
case ScCompoundType(comps, signatureMap, typesMap) =>
new ScCompoundType(comps.map(existize(_, visitedWithT)), signatureMap.map {
case (s: Signature, tp) =>
def updateTypeParam(tp: TypeParameter): TypeParameter = {
new TypeParameter(tp.name, tp.typeParams.map(updateTypeParam), () => existize(tp.lowerType(), visitedWithT),
() => existize(tp.upperType(), visitedWithT), tp.ptp)
}
val pTypes: List[Seq[() => ScType]] =
s.substitutedTypes.map(_.map(f => () => existize(f(), visitedWithT)))
val tParams: Array[TypeParameter] = if (s.typeParams.length == 0) TypeParameter.EMPTY_ARRAY else s.typeParams.map(updateTypeParam)
val rt: ScType = existize(tp, visitedWithT)
(new Signature(s.name, pTypes, s.paramLength, tParams,
ScSubstitutor.empty, s.namedElement match {
case fun: ScFunction =>
ScFunction.getCompoundCopy(pTypes.map(_.map(_()).toList), tParams.toList, rt, fun)
case b: ScBindingPattern => ScBindingPattern.getCompoundCopy(rt, b)
case f: ScFieldId => ScFieldId.getCompoundCopy(rt, f)
case named => named
}, s.hasRepeatedParam), rt)
}, typesMap.map {
case (s, sign) => (s, sign.updateTypes(existize(_, visitedWithT)))
})
case JavaArrayType(arg) => JavaArrayType(existize(arg, visitedWithT))
case ScParameterizedType(des, typeArgs) =>
ScParameterizedType(existize(des, visitedWithT), typeArgs.map(existize(_, visitedWithT)))
case ex@ScExistentialType(q, wildcards) =>
new ScExistentialType(existize(q, visitedWithT), wildcards.map {
ex => new ScExistentialArgument(ex.name, ex.args, existize(ex.lowerBound, visitedWithT), existize(ex.upperBound, visitedWithT))
})
case _ => t
}
}
val t = existize(e.getType(TypingContext.empty).getOrAny, HashSet.empty)
if (m.size == 0) t else new ScExistentialType(t, m.values.toList).simplify()
}
Success(inner, Some(this))
}
private def leastClassType(t : ScTemplateDefinition): ScType = {
val (holders, aliases): (Seq[ScDeclaredElementsHolder], Seq[ScTypeAlias]) = t.extendsBlock.templateBody match {
case Some(b: ScTemplateBody) =>
// jzaugg: Without these type annotations, a class cast exception occured above. I'm not entirely sure why.
(b.holders: Seq[ScDeclaredElementsHolder], b.aliases: Seq[ScTypeAlias])
case None => (Seq.empty, Seq.empty)
}
val superTypes = t.extendsBlock.superTypes
if (superTypes.length > 1 || !holders.isEmpty || !aliases.isEmpty) {
ScCompoundType.fromPsi(superTypes, holders.toList, aliases.toList, ScSubstitutor.empty)
} else superTypes(0)
}
def hasCaseClauses: Boolean = false
def isInCatchBlock: Boolean = getContext.isInstanceOf[ScCatchBlock]
def isAnonymousFunction = hasCaseClauses && !isInCatchBlock
def exprs: Seq[ScExpression] = findChildrenByClassScala(classOf[ScExpression]).toSeq
def statements: Seq[ScBlockStatement] = findChildrenByClassScala(classOf[ScBlockStatement]).toSeq
def hasRBrace: Boolean = getNode.getChildren(TokenSet.create(ScalaTokenTypes.tRBRACE)).length == 1
def getRBrace: Option[ASTNode] = getNode.getChildren(TokenSet.create(ScalaTokenTypes.tRBRACE)) match {
case Array(node) => Some(node)
case _ => None
}
def lastExpr = findLastChild(classOf[ScExpression])
def lastStatement = findLastChild(classOf[ScBlockStatement])
def addDefinition(decl: ScMember, before: PsiElement): Boolean = {
getNode.addChild(decl.getNode,before.getNode)
getNode.addChild(ScalaPsiElementFactory.createNewLineNode(getManager), before.getNode)
true
}
override def processDeclarations(processor: PsiScopeProcessor,
state : ResolveState,
lastParent: PsiElement,
place: PsiElement): Boolean =
super[ScDeclarationSequenceHolder].processDeclarations(processor, state, lastParent, place) &&
super[ScImportsHolder].processDeclarations(processor, state, lastParent, place)
def needCheckExpectedType = true
}
object ScBlock {
def unapplySeq(block: ScBlock): Option[Seq[ScBlockStatement]] = Option(block.statements)
}
|
JetBrains/intellij-scala-historical
|
src/org/jetbrains/plugins/scala/lang/psi/api/expr/ScBlock.scala
|
Scala
|
apache-2.0
| 10,172 |
package antonkulaga.projects
import akka.actor._
import akka.http.scaladsl.Http.{IncomingConnection, ServerBinding}
import akka.http.scaladsl.{Http, _}
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.Source
import scala.concurrent.Future
/**
* Main actor that encapsulates main application logic and starts the server
*/
class MainActor extends Actor with ActorLogging
{
implicit val system = context.system
implicit val materializer = ActorMaterializer()
implicit val executionContext = system.dispatcher
val server: HttpExt = Http(context.system)
var serverSource: Source[IncomingConnection, Future[ServerBinding]] = null
val router = new Router()
override def receive: Receive = {
case AppMessages.Start(config)=>
val (host, port) = (config.getString("app.host"), config.getInt("app.port"))
log.info(s"starting server at $host:$port")
server.bindAndHandle(router.routes, host, port)
case AppMessages.Stop=> onStop()
}
def onStop() = {
log.info("Main actor has been stoped...")
}
override def postStop() = {
onStop()
}
}
|
antonkulaga/personal
|
app/jvm/src/main/scala/antonkulaga/projects/MainActor.scala
|
Scala
|
mpl-2.0
| 1,112 |
package models
import java.util.UUID
import com.vividsolutions.jts.geom.Geometry
import com.trifectalabs.roadquality.v0.models.{PathType, SurfaceType}
case class MiniSegment(id: UUID, trafficRating: Double, surfaceRating: Double, surface: SurfaceType, pathType: PathType, path: String)
case class MiniSegmentToSegment(miniSegmentId: UUID, miniSegmentPolyline: Geometry, segmentId: UUID)
case class MiniSegmentSplit(miniSegmentId: UUID, miniSegmentGeom: Geometry, first: Geometry, firstLength: Double, second: Geometry, secondLength: Double)
|
trifectalabs/roadquality
|
api/app/models/MiniSegment.scala
|
Scala
|
bsd-3-clause
| 546 |
/*
* Copyright (c) 2017 Minkyu Cho
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
package com.github.pitzcarraldo.spring.view.node
import java.util
import javax.servlet.http.{HttpServletRequest, HttpServletResponse}
import org.springframework.web.servlet.view.AbstractTemplateView
import scala.beans.BeanProperty
import scala.collection.JavaConverters._
/**
* @author Minkyu Cho ([email protected])
*/
class NodeView extends AbstractTemplateView {
@BeanProperty
var viewPath: String = _
@BeanProperty
var renderer: NodeViewRenderer = _
override def renderMergedTemplateModel(
model: util.Map[String, AnyRef],
httpRequest: HttpServletRequest,
httpResponse: HttpServletResponse): Unit = {
httpResponse.setContentType(getContentType)
model.remove("springMacroRequestContext")
val writer = httpResponse.getWriter
try {
val viewFilePath = httpRequest.getServletContext.getResource(viewPath).getPath
val template: NodeViewTemplate = new NodeViewTemplate(viewFilePath, model)
val response: util.Map[String, AnyRef] = renderer.render(template)
if (response.containsKey("headers")) {
val headers = response.get("headers").asInstanceOf[util.Map[String, String]].asScala
headers.foreach(header => httpResponse.setHeader(header._1, header._2))
}
writer.append(response.get("body").asInstanceOf[String])
} catch {
case e: Exception =>
throw e
} finally {
writer.flush()
}
}
}
|
Pitzcarraldo/spring-node-view
|
src/main/scala/com/github/pitzcarraldo/spring/view/node/NodeView.scala
|
Scala
|
mit
| 2,653 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.openwhisk.core.entity
import scala.util.Try
import spray.json.JsString
import spray.json.JsValue
import spray.json.RootJsonFormat
import spray.json.deserializationError
protected[core] class Subject private (private val subject: String) extends AnyVal {
protected[core] def asString = subject // to make explicit that this is a string conversion
protected[entity] def toJson = JsString(subject)
override def toString = subject
}
protected[core] object Subject extends ArgNormalizer[Subject] {
/** Minimum subject length */
protected[core] val MIN_LENGTH = 5
/**
* Creates a Subject from a string.
*
* @param str the subject name, at least 6 characters
* @return Subject instance
* @throws IllegalArgumentException is argument is undefined
*/
@throws[IllegalArgumentException]
override protected[entity] def factory(str: String): Subject = {
require(str.length >= MIN_LENGTH, s"subject must be at least $MIN_LENGTH characters")
new Subject(str)
}
/**
* Creates a random subject
*
* @return Subject
*/
protected[core] def apply(): Subject = {
Subject("anon-" + rand.alphanumeric.take(27).mkString)
}
override protected[core] implicit val serdes = new RootJsonFormat[Subject] {
def write(s: Subject) = s.toJson
def read(value: JsValue) =
Try {
val JsString(s) = value
Subject(s)
} getOrElse deserializationError("subject malformed")
}
private val rand = new scala.util.Random()
}
|
starpit/openwhisk
|
common/scala/src/main/scala/org/apache/openwhisk/core/entity/Subject.scala
|
Scala
|
apache-2.0
| 2,317 |
/*
* Scala (https://www.scala-lang.org)
*
* Copyright EPFL and Lightbend, Inc.
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package scala.compat.java8
package object collectionImpl {
type Accumulator[A] = scala.jdk.AnyAccumulator[A]
val Accumulator = scala.jdk.AnyAccumulator
type IntAccumulator = scala.jdk.IntAccumulator
val IntAccumulator = scala.jdk.IntAccumulator
type LongAccumulator = scala.jdk.LongAccumulator
val LongAccumulator = scala.jdk.LongAccumulator
type DoubleAccumulator = scala.jdk.DoubleAccumulator
val DoubleAccumulator = scala.jdk.DoubleAccumulator
type Stepper[A] = scala.collection.Stepper[A]
val Stepper = scala.collection.Stepper
type AnyStepper[A] = scala.collection.AnyStepper[A]
val AnyStepper = scala.collection.AnyStepper
type IntStepper = scala.collection.IntStepper
val IntStepper = scala.collection.IntStepper
type LongStepper = scala.collection.LongStepper
val LongStepper = scala.collection.LongStepper
type DoubleStepper = scala.collection.DoubleStepper
val DoubleStepper = scala.collection.DoubleStepper
}
|
scala/scala-java8-compat
|
src/main/scala-2.13+/scala/compat/java8/collectionImpl/package.scala
|
Scala
|
apache-2.0
| 1,255 |
/*
* Copyright 1998-2016 Linux.org.ru
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ru.org.linux.util.bbcode
import org.junit.runner.RunWith
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
import ru.org.linux.util.bbcode.Parser.DEFAULT_PARSER
@RunWith(classOf[JUnitRunner])
class UrlWithParamTagSpec extends Specification {
private def parse(text: String): String = {
DEFAULT_PARSER.parseRoot(DEFAULT_PARSER.createRootNode, text).renderXHtml
}
"parse and renderXhtml" should {
"escape quotes and markup" in {
parse("""[url=http://tts.com/"><b>a</b>]usrl[/url]""") must be equalTo
"""<p><a href="http://tts.com/"><b>a</b>">usrl</a></p>"""
}
}
}
|
kloun/lorsource
|
src/test/scala/ru/org/linux/util/bbcode/UrlWithParamTagSpec.scala
|
Scala
|
apache-2.0
| 1,277 |
/*
* ScalaRay - Ray tracer based on pbrt (see http://pbrt.org) written in Scala
* Copyright (C) 2009, 2010, 2011 Jesper de Jong
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.jesperdj.scalaray.vecmath
// Trait for classes that can be intersected with a ray
trait Intersectable[R] {
// Compute closest intersection with the given ray
def intersect(ray: Ray): Option[R]
// Check if there is an intersection with the given ray; override this if a more efficient implementation can be provided
def checkIntersect(ray: Ray): Boolean = intersect(ray).isDefined
}
|
jesperdj/scalaray
|
src/main/scala/org/jesperdj/scalaray/vecmath/Intersectable.scala
|
Scala
|
gpl-3.0
| 1,188 |
object Test extends scala.tools.partest.StubErrorMessageTest {
def codeA = """
package stuberrors
class A[T]
"""
def codeB = """
package stuberrors
class B[T: A](val t: T)
"""
def userCode = """
package stuberrors
// Here we want a stub error not an implicit not found error
class C { println(new B(1)) }
"""
def removeFromClasspath(): Unit = {
removeClasses("stuberrors", List("A"))
}
}
|
scala/scala
|
test/files/run/StubErrorTypeclass.scala
|
Scala
|
apache-2.0
| 439 |
/**
* Copyright (C) 2009-2017 Lightbend Inc. <http://www.lightbend.com>
*/
package akka.event
import java.util.concurrent.TimeoutException
import java.util.concurrent.atomic.AtomicInteger
import akka.actor.ActorSystem.Settings
import akka.actor._
import akka.annotation.{ DoNotInherit, InternalApi }
import akka.dispatch.RequiresMessageQueue
import akka.event.Logging._
import akka.util.ReentrantGuard
import akka.util.Helpers
import akka.{ AkkaException, ConfigurationException }
import scala.annotation.implicitNotFound
import scala.collection.immutable
import scala.concurrent.Await
import scala.language.existentials
import scala.util.control.{ NoStackTrace, NonFatal }
/**
* This trait brings log level handling to the EventStream: it reads the log
* levels for the initial logging (StandardOutLogger) and the loggers & level
* for after-init logging, possibly keeping the StandardOutLogger enabled if
* it is part of the configured loggers. All configured loggers are treated as
* system services and managed by this trait, i.e. subscribed/unsubscribed in
* response to changes of LoggingBus.logLevel.
*/
trait LoggingBus extends ActorEventBus {
type Event >: Logging.LogEvent
type Classifier >: Class[_]
import Logging._
private val guard = new ReentrantGuard
private var loggers = Seq.empty[ActorRef]
@volatile private var _logLevel: LogLevel = _
/**
* Query currently set log level. See object Logging for more information.
*/
def logLevel = _logLevel
/**
* Change log level: default loggers (i.e. from configuration file) are
* subscribed/unsubscribed as necessary so that they listen to all levels
* which are at least as severe as the given one. See object Logging for
* more information.
*
* NOTE: if the StandardOutLogger is configured also as normal logger, it
* will not participate in the automatic management of log level
* subscriptions!
*/
def setLogLevel(level: LogLevel): Unit = guard.withGuard {
val logLvl = _logLevel // saves (2 * AllLogLevel.size - 1) volatile reads (because of the loops below)
for {
l ← AllLogLevels
// subscribe if previously ignored and now requested
if l > logLvl && l <= level
log ← loggers
} subscribe(log, classFor(l))
for {
l ← AllLogLevels
// unsubscribe if previously registered and now ignored
if l <= logLvl && l > level
log ← loggers
} unsubscribe(log, classFor(l))
_logLevel = level
}
private def setUpStdoutLogger(config: Settings) {
val level = levelFor(config.StdoutLogLevel) getOrElse {
// only log initialization errors directly with StandardOutLogger.print
StandardOutLogger.print(Error(new LoggerException, simpleName(this), this.getClass, "unknown akka.stdout-loglevel " + config.StdoutLogLevel))
ErrorLevel
}
AllLogLevels filter (level >= _) foreach (l ⇒ subscribe(StandardOutLogger, classFor(l)))
guard.withGuard {
loggers :+= StandardOutLogger
_logLevel = level
}
}
/**
* Internal Akka use only
*/
private[akka] def startStdoutLogger(config: Settings) {
setUpStdoutLogger(config)
publish(Debug(simpleName(this), this.getClass, "StandardOutLogger started"))
}
/**
* Internal Akka use only
*/
private[akka] def startDefaultLoggers(system: ActorSystemImpl) {
val logName = simpleName(this) + "(" + system + ")"
val level = levelFor(system.settings.LogLevel) getOrElse {
// only log initialization errors directly with StandardOutLogger.print
StandardOutLogger.print(Error(new LoggerException, logName, this.getClass, "unknown akka.loglevel " + system.settings.LogLevel))
ErrorLevel
}
try {
val defaultLoggers = system.settings.Loggers match {
case Nil ⇒ classOf[DefaultLogger].getName :: Nil
case loggers ⇒ loggers
}
val myloggers =
for {
loggerName ← defaultLoggers
if loggerName != StandardOutLogger.getClass.getName
} yield {
system.dynamicAccess.getClassFor[Actor](loggerName).map({
case actorClass ⇒ addLogger(system, actorClass, level, logName)
}).recover({
case e ⇒ throw new ConfigurationException(
"Logger specified in config can't be loaded [" + loggerName +
"] due to [" + e.toString + "]", e)
}).get
}
guard.withGuard {
loggers = myloggers
_logLevel = level
}
try {
if (system.settings.DebugUnhandledMessage)
subscribe(system.systemActorOf(Props(new Actor {
def receive = {
case UnhandledMessage(msg, sender, rcp) ⇒
publish(Debug(rcp.path.toString, rcp.getClass, "unhandled message from " + sender + ": " + msg))
}
}), "UnhandledMessageForwarder"), classOf[UnhandledMessage])
} catch {
case _: InvalidActorNameException ⇒ // ignore if it is already running
}
publish(Debug(logName, this.getClass, "Default Loggers started"))
if (!(defaultLoggers contains StandardOutLogger.getClass.getName)) {
unsubscribe(StandardOutLogger)
}
} catch {
case e: Exception ⇒
System.err.println("error while starting up loggers")
e.printStackTrace()
throw new ConfigurationException("Could not start logger due to [" + e.toString + "]")
}
}
/**
* Internal Akka use only
*/
private[akka] def stopDefaultLoggers(system: ActorSystem) {
val level = _logLevel // volatile access before reading loggers
if (!(loggers contains StandardOutLogger)) {
setUpStdoutLogger(system.settings)
publish(Debug(simpleName(this), this.getClass, "shutting down: StandardOutLogger started"))
}
for {
logger ← loggers
if logger != StandardOutLogger
} {
// this is very necessary, else you get infinite loop with DeadLetter
unsubscribe(logger)
logger match {
case ref: InternalActorRef ⇒ ref.stop()
case _ ⇒
}
}
publish(Debug(simpleName(this), this.getClass, "all default loggers stopped"))
}
/**
* INTERNAL API
*/
private def addLogger(system: ActorSystemImpl, clazz: Class[_ <: Actor], level: LogLevel, logName: String): ActorRef = {
val name = "log" + LogExt(system).id() + "-" + simpleName(clazz)
val actor = system.systemActorOf(Props(clazz).withDispatcher(system.settings.LoggersDispatcher), name)
implicit def timeout = system.settings.LoggerStartTimeout
import akka.pattern.ask
val response = try Await.result(actor ? InitializeLogger(this), timeout.duration) catch {
case _: TimeoutException ⇒
publish(Warning(logName, this.getClass, "Logger " + name + " did not respond within " + timeout + " to InitializeLogger(bus)"))
"[TIMEOUT]"
}
if (response != LoggerInitialized)
throw new LoggerInitializationException("Logger " + name + " did not respond with LoggerInitialized, sent instead " + response)
AllLogLevels filter (level >= _) foreach (l ⇒ subscribe(actor, classFor(l)))
publish(Debug(logName, this.getClass, "logger " + name + " started"))
actor
}
}
/**
* This trait defines the interface to be provided by a “log source formatting
* rule” as used by [[akka.event.Logging]]’s `apply`/`create` method.
*
* See the companion object for default implementations.
*
* Example:
* {{{
* trait MyType { // as an example
* def name: String
* }
*
* implicit val myLogSourceType: LogSource[MyType] = new LogSource[MyType] {
* def genString(a: MyType) = a.name
* }
*
* class MyClass extends MyType {
* val log = Logging(eventStream, this) // will use "hallo" as logSource
* def name = "hallo"
* }
* }}}
*
* The second variant is used for including the actor system’s address:
* {{{
* trait MyType { // as an example
* def name: String
* }
*
* implicit val myLogSourceType: LogSource[MyType] = new LogSource[MyType] {
* def genString(a: MyType) = a.name
* def genString(a: MyType, s: ActorSystem) = a.name + "," + s
* }
*
* class MyClass extends MyType {
* val sys = ActorSystem("sys")
* val log = Logging(sys, this) // will use "hallo,akka://sys" as logSource
* def name = "hallo"
* }
* }}}
*
* The default implementation of the second variant will just call the first.
*/
@implicitNotFound("Cannot find LogSource for ${T} please see ScalaDoc for LogSource for how to obtain or construct one.") trait LogSource[-T] {
def genString(t: T): String
def genString(t: T, system: ActorSystem): String = genString(t)
def getClazz(t: T): Class[_] = t.getClass
}
/**
* This is a “marker” class which is inserted as originator class into
* [[akka.event.Logging.LogEvent]] when the string representation was supplied
* directly.
*/
class DummyClassForStringSources
/**
* This object holds predefined formatting rules for log sources.
*
* In case an [[akka.actor.ActorSystem]] is provided, the following apply:
* <ul>
* <li>[[akka.actor.Actor]] and [[akka.actor.ActorRef]] will be represented by their absolute physical path</li>
* <li>providing a `String` as source will append "(<system address>)" and use the result</li>
* <li>providing a `Class` will extract its simple name, append "(<system address>)" and use the result</li>
* <li>anything else gives compile error unless implicit [[akka.event.LogSource]] is in scope for it</li>
* </ul>
*
* In case a [[akka.event.LoggingBus]] is provided, the following apply:
* <ul>
* <li>[[akka.actor.Actor]] and [[akka.actor.ActorRef]] will be represented by their absolute physical path</li>
* <li>providing a `String` as source will be used as is</li>
* <li>providing a `Class` will extract its simple name</li>
* <li>anything else gives compile error unless implicit [[akka.event.LogSource]] is in scope for it</li>
* </ul>
*/
object LogSource {
implicit val fromString: LogSource[String] = new LogSource[String] {
def genString(s: String) = s
override def genString(s: String, system: ActorSystem) = s + "(" + system + ")"
override def getClazz(s: String) = classOf[DummyClassForStringSources]
}
implicit val fromActor: LogSource[Actor] = new LogSource[Actor] {
def genString(a: Actor) = fromActorRef.genString(a.self)
override def genString(a: Actor, system: ActorSystem) = fromActorRef.genString(a.self, system)
}
implicit val fromActorRef: LogSource[ActorRef] = new LogSource[ActorRef] {
def genString(a: ActorRef) = a.path.toString
override def genString(a: ActorRef, system: ActorSystem) = try {
a.path.toStringWithAddress(system.asInstanceOf[ExtendedActorSystem].provider.getDefaultAddress)
} catch {
// it can fail if the ActorSystem (remoting) is not completely started yet
case NonFatal(_) ⇒ a.path.toString
}
}
// this one unfortunately does not work as implicit, because existential types have some weird behavior
val fromClass: LogSource[Class[_]] = new LogSource[Class[_]] {
def genString(c: Class[_]): String = Logging.simpleName(c)
override def genString(c: Class[_], system: ActorSystem): String = genString(c) + "(" + system + ")"
override def getClazz(c: Class[_]): Class[_] = c
}
implicit def fromAnyClass[T]: LogSource[Class[T]] = fromClass.asInstanceOf[LogSource[Class[T]]]
/**
* Convenience converter access: given an implicit `LogSource`, generate the
* string representation and originating class.
*/
def apply[T: LogSource](o: T): (String, Class[_]) = {
val ls = implicitly[LogSource[T]]
(ls.genString(o), ls.getClazz(o))
}
/**
* Convenience converter access: given an implicit `LogSource` and
* [[akka.actor.ActorSystem]], generate the string representation and
* originating class.
*/
def apply[T: LogSource](o: T, system: ActorSystem): (String, Class[_]) = {
val ls = implicitly[LogSource[T]]
(ls.genString(o, system), ls.getClazz(o))
}
/**
* construct string representation for any object according to
* rules above with fallback to its `Class`’s simple name.
*/
def fromAnyRef(o: AnyRef): (String, Class[_]) =
o match {
case c: Class[_] ⇒ apply(c)
case a: Actor ⇒ apply(a)
case a: ActorRef ⇒ apply(a)
case s: String ⇒ apply(s)
case x ⇒ (Logging.simpleName(x), x.getClass)
}
/**
* construct string representation for any object according to
* rules above (including the actor system’s address) with fallback to its
* `Class`’s simple name.
*/
def fromAnyRef(o: AnyRef, system: ActorSystem): (String, Class[_]) =
o match {
case c: Class[_] ⇒ apply(c)
case a: Actor ⇒ apply(a)
case a: ActorRef ⇒ apply(a)
case s: String ⇒ apply(s)
case x ⇒ (Logging.simpleName(x) + "(" + system + ")", x.getClass)
}
}
/**
* Main entry point for Akka logging: log levels and message types (aka
* channels) defined for the main transport medium, the main event bus. The
* recommended use is to obtain an implementation of the Logging trait with
* suitable and efficient methods for generating log events:
*
* <pre><code>
* val log = Logging(<bus>, <source object>)
* ...
* log.info("hello world!")
* </code></pre>
*
* The source object is used in two fashions: its `Class[_]` will be part of
* all log events produced by this logger, plus a string representation is
* generated which may contain per-instance information, see `apply` or `create`
* below.
*
* Loggers are attached to the level-specific channels <code>Error</code>,
* <code>Warning</code>, <code>Info</code> and <code>Debug</code> as
* appropriate for the configured (or set) log level. If you want to implement
* your own, make sure to handle these four event types plus the <code>InitializeLogger</code>
* message which is sent before actually attaching it to the logging bus.
*
* Logging is configured by setting (some of) the following:
*
* <pre><code>
* akka {
* loggers = ["akka.slf4j.Slf4jLogger"] # for example
* loglevel = "INFO" # used when normal logging ("loggers") has been started
* stdout-loglevel = "WARN" # used during application start-up until normal logging is available
* }
* </code></pre>
*/
object Logging {
/**
* Returns a 'safe' getSimpleName for the provided object's Class
* @return the simple name of the given object's Class
*/
def simpleName(obj: AnyRef): String = simpleName(obj.getClass)
/**
* Returns a 'safe' getSimpleName for the provided Class
* @return the simple name of the given Class
*/
def simpleName(clazz: Class[_]): String = {
val n = clazz.getName
val i = n.lastIndexOf('.')
n.substring(i + 1)
}
/**
* Class name representation of a message.
* `ActorSelectionMessage` representation includes class name of
* wrapped message.
*/
def messageClassName(message: Any): String = message match {
case null ⇒ "null"
case ActorSelectionMessage(m, _, _) ⇒ s"ActorSelectionMessage(${m.getClass.getName})"
case m ⇒ m.getClass.getName
}
/**
* INTERNAL API
*/
private[akka] object LogExt extends ExtensionId[LogExt] {
override def createExtension(system: ExtendedActorSystem): LogExt =
new LogExt(system)
}
/**
* INTERNAL API
*/
private[akka] class LogExt(system: ExtendedActorSystem) extends Extension {
private val loggerId = new AtomicInteger
def id() = loggerId.incrementAndGet()
}
/**
* Marker trait for annotating LogLevel, which must be Int after erasure.
*/
final case class LogLevel(asInt: Int) extends AnyVal {
@inline final def >=(other: LogLevel): Boolean = asInt >= other.asInt
@inline final def <=(other: LogLevel): Boolean = asInt <= other.asInt
@inline final def >(other: LogLevel): Boolean = asInt > other.asInt
@inline final def <(other: LogLevel): Boolean = asInt < other.asInt
}
/**
* Log level in numeric form, used when deciding whether a certain log
* statement should generate a log event. Predefined levels are ErrorLevel (1)
* to DebugLevel (4). In case you want to add more levels, loggers need to
* be subscribed to their event bus channels manually.
*/
final val ErrorLevel = LogLevel(1)
final val WarningLevel = LogLevel(2)
final val InfoLevel = LogLevel(3)
final val DebugLevel = LogLevel(4)
/**
* Internal Akka use only
*
* Don't include the OffLevel in the AllLogLevels since we should never subscribe
* to some kind of OffEvent.
*/
private final val OffLevel = LogLevel(Int.MinValue)
/**
* Returns the LogLevel associated with the given string,
* valid inputs are upper or lowercase (not mixed) versions of:
* "error", "warning", "info" and "debug"
*/
def levelFor(s: String): Option[LogLevel] = Helpers.toRootLowerCase(s) match {
case "off" ⇒ Some(OffLevel)
case "error" ⇒ Some(ErrorLevel)
case "warning" ⇒ Some(WarningLevel)
case "info" ⇒ Some(InfoLevel)
case "debug" ⇒ Some(DebugLevel)
case unknown ⇒ None
}
/**
* Returns the LogLevel associated with the given event class.
* Defaults to DebugLevel.
*/
def levelFor(eventClass: Class[_ <: LogEvent]): LogLevel = {
if (classOf[Error].isAssignableFrom(eventClass)) ErrorLevel
else if (classOf[Warning].isAssignableFrom(eventClass)) WarningLevel
else if (classOf[Info].isAssignableFrom(eventClass)) InfoLevel
else if (classOf[Debug].isAssignableFrom(eventClass)) DebugLevel
else DebugLevel
}
/**
* Returns the event class associated with the given LogLevel
*/
def classFor(level: LogLevel): Class[_ <: LogEvent] = level match {
case ErrorLevel ⇒ classOf[Error]
case WarningLevel ⇒ classOf[Warning]
case InfoLevel ⇒ classOf[Info]
case DebugLevel ⇒ classOf[Debug]
}
// these type ascriptions/casts are necessary to avoid CCEs during construction while retaining correct type
val AllLogLevels: immutable.Seq[LogLevel] = Vector(ErrorLevel, WarningLevel, InfoLevel, DebugLevel)
/**
* Obtain LoggingAdapter for the given actor system and source object. This
* will use the system’s event stream and include the system’s address in the
* log source string.
*
* <b>Do not use this if you want to supply a log category string (like
* “com.example.app.whatever”) unaltered,</b> supply `system.eventStream` in this
* case or use
*
* {{{
* Logging(system, this.getClass)
* }}}
*
* The source is used to identify the source of this logging channel and
* must have a corresponding implicit LogSource[T] instance in scope; by
* default these are provided for Class[_], Actor, ActorRef and String types.
* See the companion object of [[akka.event.LogSource]] for details.
*
* You can add your own rules quite easily, see [[akka.event.LogSource]].
*/
def apply[T: LogSource](system: ActorSystem, logSource: T): LoggingAdapter = {
val (str, clazz) = LogSource(logSource, system)
new BusLogging(system.eventStream, str, clazz, system.asInstanceOf[ExtendedActorSystem].logFilter)
}
/**
* Obtain LoggingAdapter with additional "marker" support (which some logging frameworks are able to utilise)
* for the given actor system and source object. This will use the system’s event stream and include the system’s
* address in the log source string.
*
* <b>Do not use this if you want to supply a log category string (like
* “com.example.app.whatever”) unaltered,</b> supply `system.eventStream` in this
* case or use
*
* {{{
* Logging(system, this.getClass)
* }}}
*
* The source is used to identify the source of this logging channel and
* must have a corresponding implicit LogSource[T] instance in scope; by
* default these are provided for Class[_], Actor, ActorRef and String types.
* See the companion object of [[akka.event.LogSource]] for details.
*
* You can add your own rules quite easily, see [[akka.event.LogSource]].
*/
def withMarker[T: LogSource](system: ActorSystem, logSource: T): MarkerLoggingAdapter = {
val (str, clazz) = LogSource(logSource, system)
new MarkerLoggingAdapter(system.eventStream, str, clazz, system.asInstanceOf[ExtendedActorSystem].logFilter)
}
/**
* Obtain LoggingAdapter for the given logging bus and source object.
*
* The source is used to identify the source of this logging channel and
* must have a corresponding implicit LogSource[T] instance in scope; by
* default these are provided for Class[_], Actor, ActorRef and String types.
* See the companion object of [[akka.event.LogSource]] for details.
*
* You can add your own rules quite easily, see [[akka.event.LogSource]].
*
* Note that this `LoggingAdapter` will use the [[akka.event.DefaultLoggingFilter]],
* and not the [[akka.event.LoggingFilter]] configured for the system
* (if different from `DefaultLoggingFilter`).
*/
def apply[T: LogSource](bus: LoggingBus, logSource: T): LoggingAdapter = {
val (str, clazz) = LogSource(logSource)
new BusLogging(bus, str, clazz)
}
/**
* Obtain LoggingAdapter for the given logging bus and source object.
*
* The source is used to identify the source of this logging channel and
* must have a corresponding implicit LogSource[T] instance in scope; by
* default these are provided for Class[_], Actor, ActorRef and String types.
* See the companion object of [[akka.event.LogSource]] for details.
*
* You can add your own rules quite easily, see [[akka.event.LogSource]].
*
* Note that this `LoggingAdapter` will use the [[akka.event.DefaultLoggingFilter]],
* and not the [[akka.event.LoggingFilter]] configured for the system
* (if different from `DefaultLoggingFilter`).
*/
def withMarker[T: LogSource](bus: LoggingBus, logSource: T): MarkerLoggingAdapter = {
val (str, clazz) = LogSource(logSource)
new MarkerLoggingAdapter(bus, str, clazz)
}
/**
* Obtain LoggingAdapter with MDC support for the given actor.
* Don't use it outside its specific Actor as it isn't thread safe
*/
def apply(logSource: Actor): DiagnosticLoggingAdapter = {
val (str, clazz) = LogSource(logSource)
val system = logSource.context.system.asInstanceOf[ExtendedActorSystem]
new BusLogging(system.eventStream, str, clazz, system.logFilter) with DiagnosticLoggingAdapter
}
/**
* Obtain LoggingAdapter with marker and MDC support for the given actor.
* Don't use it outside its specific Actor as it isn't thread safe
*/
def withMarker(logSource: Actor): DiagnosticMarkerBusLoggingAdapter = {
val (str, clazz) = LogSource(logSource)
val system = logSource.context.system.asInstanceOf[ExtendedActorSystem]
new DiagnosticMarkerBusLoggingAdapter(system.eventStream, str, clazz, system.logFilter)
}
/**
* Obtain LoggingAdapter for the given actor system and source object. This
* will use the system’s event stream and include the system’s address in the
* log source string.
*
* <b>Do not use this if you want to supply a log category string (like
* “com.example.app.whatever”) unaltered,</b> supply `system.eventStream` in this
* case or use
*
* {{{
* Logging.getLogger(system, this.getClass());
* }}}
*
* The source is used to identify the source of this logging channel and
* must have a corresponding implicit LogSource[T] instance in scope; by
* default these are provided for Class[_], Actor, ActorRef and String types.
* See the companion object of [[akka.event.LogSource]] for details.
*/
def getLogger(system: ActorSystem, logSource: AnyRef): LoggingAdapter = {
val (str, clazz) = LogSource.fromAnyRef(logSource, system)
new BusLogging(system.eventStream, str, clazz, system.asInstanceOf[ExtendedActorSystem].logFilter)
}
/**
* Obtain LoggingAdapter for the given logging bus and source object.
*
* The source is used to identify the source of this logging channel and
* must have a corresponding implicit LogSource[T] instance in scope; by
* default these are provided for Class[_], Actor, ActorRef and String types.
* See the companion object of [[akka.event.LogSource]] for details.
*
* Note that this `LoggingAdapter` will use the [[akka.event.DefaultLoggingFilter]],
* and not the [[akka.event.LoggingFilter]] configured for the system
* (if different from `DefaultLoggingFilter`).
*/
def getLogger(bus: LoggingBus, logSource: AnyRef): LoggingAdapter = {
val (str, clazz) = LogSource.fromAnyRef(logSource)
new BusLogging(bus, str, clazz)
}
/**
* Obtain LoggingAdapter with MDC support for the given actor.
* Don't use it outside its specific Actor as it isn't thread safe
*/
def getLogger(logSource: Actor): DiagnosticLoggingAdapter = apply(logSource)
/**
* Obtain LoggingAdapter with MDC support for the given actor.
* Don't use it outside its specific Actor as it isn't thread safe
*/
@deprecated("Use AbstractActor instead of UntypedActor.", since = "2.5.0")
def getLogger(logSource: UntypedActor): DiagnosticLoggingAdapter = {
val (str, clazz) = LogSource.fromAnyRef(logSource)
val system = logSource.getContext().system.asInstanceOf[ExtendedActorSystem]
new BusLogging(system.eventStream, str, clazz, system.logFilter) with DiagnosticLoggingAdapter
}
/**
* Artificial exception injected into Error events if no Throwable is
* supplied; used for getting a stack dump of error locations.
*/
class LoggerException extends AkkaException("")
/**
* Exception that wraps a LogEvent.
*/
class LogEventException(val event: LogEvent, cause: Throwable) extends NoStackTrace {
override def getMessage: String = event.toString
override def getCause: Throwable = cause
}
/**
* Base type of LogEvents
*/
sealed trait LogEvent extends NoSerializationVerificationNeeded {
/**
* The thread that created this log event
*/
@transient
val thread: Thread = Thread.currentThread
/**
* When this LogEvent was created according to System.currentTimeMillis
*/
val timestamp: Long = System.currentTimeMillis
/**
* The LogLevel of this LogEvent
*/
def level: LogLevel
/**
* The source of this event
*/
def logSource: String
/**
* The class of the source of this event
*/
def logClass: Class[_]
/**
* The message, may be any object or null.
*/
def message: Any
/**
* Extra values for adding to MDC
*/
def mdc: MDC = emptyMDC
/**
* Java API: Retrieve the contents of the MDC.
*/
def getMDC: java.util.Map[String, Any] = {
import scala.collection.JavaConverters._
mdc.asJava
}
}
object LogEvent {
def apply(level: LogLevel, logSource: String, logClass: Class[_], message: Any): LogEvent = level match {
case ErrorLevel ⇒ Error(logSource, logClass, message)
case WarningLevel ⇒ Warning(logSource, logClass, message)
case InfoLevel ⇒ Info(logSource, logClass, message)
case DebugLevel ⇒ Debug(logSource, logClass, message)
}
def apply(level: LogLevel, logSource: String, logClass: Class[_], message: Any, mdc: MDC): LogEvent = level match {
case ErrorLevel ⇒ Error(logSource, logClass, message, mdc)
case WarningLevel ⇒ Warning(logSource, logClass, message, mdc)
case InfoLevel ⇒ Info(logSource, logClass, message, mdc)
case DebugLevel ⇒ Debug(logSource, logClass, message, mdc)
}
def apply(level: LogLevel, logSource: String, logClass: Class[_], message: Any, mdc: MDC, marker: LogMarker): LogEvent = level match {
case ErrorLevel ⇒ Error(logSource, logClass, message, mdc, marker)
case WarningLevel ⇒ Warning(logSource, logClass, message, mdc, marker)
case InfoLevel ⇒ Info(logSource, logClass, message, mdc, marker)
case DebugLevel ⇒ Debug(logSource, logClass, message, mdc, marker)
}
}
/**
* For ERROR Logging
*/
case class Error(cause: Throwable, logSource: String, logClass: Class[_], message: Any = "") extends LogEvent {
def this(logSource: String, logClass: Class[_], message: Any) = this(Error.NoCause, logSource, logClass, message)
override def level = ErrorLevel
}
class Error2(cause: Throwable, logSource: String, logClass: Class[_], message: Any = "", override val mdc: MDC) extends Error(cause, logSource, logClass, message) {
def this(logSource: String, logClass: Class[_], message: Any, mdc: MDC) = this(Error.NoCause, logSource, logClass, message, mdc)
}
class Error3(cause: Throwable, logSource: String, logClass: Class[_], message: Any, override val mdc: MDC, override val marker: LogMarker)
extends Error2(cause, logSource, logClass, message, mdc) with LogEventWithMarker {
def this(logSource: String, logClass: Class[_], message: Any, mdc: MDC, marker: LogMarker) = this(Error.NoCause, logSource, logClass, message, mdc, marker)
}
object Error {
def apply(logSource: String, logClass: Class[_], message: Any) =
new Error(NoCause, logSource, logClass, message)
def apply(logSource: String, logClass: Class[_], message: Any, marker: LogMarker) =
new Error3(NoCause, logSource, logClass, message, Map.empty, marker)
def apply(cause: Throwable, logSource: String, logClass: Class[_], message: Any, mdc: MDC) =
new Error2(cause, logSource, logClass, message, mdc)
def apply(cause: Throwable, logSource: String, logClass: Class[_], message: Any, mdc: MDC, marker: LogMarker) =
new Error3(cause, logSource, logClass, message, mdc, marker)
def apply(logSource: String, logClass: Class[_], message: Any, mdc: MDC) =
new Error2(NoCause, logSource, logClass, message, mdc)
def apply(logSource: String, logClass: Class[_], message: Any, mdc: MDC, marker: LogMarker) =
new Error3(NoCause, logSource, logClass, message, mdc, marker)
/** Null Object used for errors without cause Throwable */
object NoCause extends NoStackTrace
}
def noCause = Error.NoCause
/**
* For WARNING Logging
*/
case class Warning(logSource: String, logClass: Class[_], message: Any = "") extends LogEvent {
override def level = WarningLevel
}
class Warning2(logSource: String, logClass: Class[_], message: Any, override val mdc: MDC) extends Warning(logSource, logClass, message)
class Warning3(logSource: String, logClass: Class[_], message: Any, override val mdc: MDC, override val marker: LogMarker)
extends Warning2(logSource, logClass, message, mdc) with LogEventWithMarker
object Warning {
def apply(logSource: String, logClass: Class[_], message: Any, mdc: MDC) = new Warning2(logSource, logClass, message, mdc)
def apply(logSource: String, logClass: Class[_], message: Any, mdc: MDC, marker: LogMarker) = new Warning3(logSource, logClass, message, mdc, marker)
}
/**
* For INFO Logging
*/
case class Info(logSource: String, logClass: Class[_], message: Any = "") extends LogEvent {
override def level = InfoLevel
}
class Info2(logSource: String, logClass: Class[_], message: Any, override val mdc: MDC) extends Info(logSource, logClass, message)
class Info3(logSource: String, logClass: Class[_], message: Any, override val mdc: MDC, override val marker: LogMarker)
extends Info2(logSource, logClass, message, mdc) with LogEventWithMarker
object Info {
def apply(logSource: String, logClass: Class[_], message: Any, mdc: MDC) = new Info2(logSource, logClass, message, mdc)
def apply(logSource: String, logClass: Class[_], message: Any, mdc: MDC, marker: LogMarker) = new Info3(logSource, logClass, message, mdc, marker)
}
/**
* For DEBUG Logging
*/
case class Debug(logSource: String, logClass: Class[_], message: Any = "") extends LogEvent {
override def level = DebugLevel
}
class Debug2(logSource: String, logClass: Class[_], message: Any, override val mdc: MDC) extends Debug(logSource, logClass, message)
class Debug3(logSource: String, logClass: Class[_], message: Any, override val mdc: MDC, override val marker: LogMarker)
extends Debug2(logSource, logClass, message, mdc) with LogEventWithMarker
object Debug {
def apply(logSource: String, logClass: Class[_], message: Any, mdc: MDC) = new Debug2(logSource, logClass, message, mdc)
def apply(logSource: String, logClass: Class[_], message: Any, mdc: MDC, marker: LogMarker) = new Debug3(logSource, logClass, message, mdc, marker)
}
/** INTERNAL API, Marker interface for LogEvents containing Markers, which can be set for example on an slf4j logger */
sealed trait LogEventWithMarker extends LogEvent {
def marker: LogMarker
/** Appends the marker to the Debug/Info/Warning/Error toString representations */
override def toString = {
val s = super.toString
s.substring(0, s.length - 1) + "," + marker + ")"
}
}
/**
* Message which is sent to each default logger (i.e. from configuration file)
* after its creation but before attaching it to the logging bus. The logger
* actor must handle this message, it can be used e.g. to register for more
* channels. When done, the logger must respond with a LoggerInitialized
* message. This is necessary to ensure that additional subscriptions are in
* effect when the logging system finished starting.
*/
final case class InitializeLogger(bus: LoggingBus) extends NoSerializationVerificationNeeded
/**
* Response message each logger must send within 1 second after receiving the
* InitializeLogger request. If initialization takes longer, send the reply
* as soon as subscriptions are set-up.
*/
abstract class LoggerInitialized
case object LoggerInitialized extends LoggerInitialized {
/**
* Java API: get the singleton instance
*/
def getInstance = this
}
/**
* Java API to create a LoggerInitialized message.
*/
// weird return type due to binary compatibility
def loggerInitialized(): LoggerInitialized.type = LoggerInitialized
/**
* LoggerInitializationException is thrown to indicate that there was a problem initializing a logger
* @param msg
*/
class LoggerInitializationException(msg: String) extends AkkaException(msg)
trait StdOutLogger {
import StdOutLogger._
// format: OFF
// FIXME: remove those when we have the chance to break binary compatibility
private val errorFormat = ErrorFormat
private val errorFormatWithoutCause = ErrorFormatWithoutCause
private val warningFormat = WarningFormat
private val infoFormat = InfoFormat
private val debugFormat = DebugFormat
// format: ON
def timestamp(event: LogEvent): String = Helpers.timestamp(event.timestamp)
def print(event: Any): Unit = event match {
case e: Error ⇒ error(e)
case e: Warning ⇒ warning(e)
case e: Info ⇒ info(e)
case e: Debug ⇒ debug(e)
case e ⇒ warning(Warning(simpleName(this), this.getClass, "received unexpected event of class " + e.getClass + ": " + e))
}
def error(event: Error): Unit = event match {
case e: Error3 ⇒ // has marker
val f = if (event.cause == Error.NoCause) ErrorWithoutCauseWithMarkerFormat else ErrorFormatWithMarker
println(f.format(
e.marker.name,
timestamp(event),
event.thread.getName,
event.logSource,
event.message,
stackTraceFor(event.cause)))
case _ ⇒
val f = if (event.cause == Error.NoCause) ErrorFormatWithoutCause else ErrorFormat
println(f.format(
timestamp(event),
event.thread.getName,
event.logSource,
event.message,
stackTraceFor(event.cause)))
}
def warning(event: Warning): Unit = event match {
case e: Warning3 ⇒ // has marker
println(WarningWithMarkerFormat.format(
e.marker.name,
timestamp(event),
event.thread.getName,
event.logSource,
event.message))
case _ ⇒
println(WarningFormat.format(
timestamp(event),
event.thread.getName,
event.logSource,
event.message))
}
def info(event: Info): Unit = event match {
case e: Info3 ⇒ // has marker
println(InfoWithMarkerFormat.format(
e.marker.name,
timestamp(event),
event.thread.getName,
event.logSource,
event.message))
case _ ⇒
println(InfoFormat.format(
timestamp(event),
event.thread.getName,
event.logSource,
event.message))
}
def debug(event: Debug): Unit = event match {
case e: Debug3 ⇒ // has marker
println(DebugWithMarkerFormat.format(
e.marker.name,
timestamp(event),
event.thread.getName,
event.logSource,
event.message))
case _ ⇒
println(DebugFormat.format(
timestamp(event),
event.thread.getName,
event.logSource,
event.message))
}
}
object StdOutLogger {
// format: OFF
private final val ErrorFormat = "[ERROR] [%s] [%s] [%s] %s%s"
private final val ErrorFormatWithMarker = "[ERROR] [%s][%s] [%s] [%s] %s%s"
private final val ErrorFormatWithoutCause = "[ERROR] [%s] [%s] [%s] %s"
private final val ErrorWithoutCauseWithMarkerFormat = "[ERROR] [%s][%s] [%s] [%s] %s"
private final val WarningFormat = "[WARN] [%s] [%s] [%s] %s"
private final val WarningWithMarkerFormat = "[WARN] [%s][%s] [%s] [%s] %s"
private final val InfoFormat = "[INFO] [%s] [%s] [%s] %s"
private final val InfoWithMarkerFormat = "[INFO] [%s][%s] [%s] [%s] %s"
private final val DebugFormat = "[DEBUG] [%s] [%s] [%s] %s"
private final val DebugWithMarkerFormat = "[DEBUG] [%s][%s] [%s] [%s] %s"
// format: ON
}
/**
* Actor-less logging implementation for synchronous logging to standard
* output. This logger is always attached first in order to be able to log
* failures during application start-up, even before normal logging is
* started. Its log level can be defined by configuration setting
* <code>akka.stdout-loglevel</code>.
*/
class StandardOutLogger extends MinimalActorRef with StdOutLogger {
val path: ActorPath = RootActorPath(Address("akka", "all-systems"), "/StandardOutLogger")
def provider: ActorRefProvider = throw new UnsupportedOperationException("StandardOutLogger does not provide")
override val toString = "StandardOutLogger"
override def !(message: Any)(implicit sender: ActorRef = Actor.noSender): Unit =
if (message == null) throw InvalidMessageException("Message is null")
else print(message)
@throws(classOf[java.io.ObjectStreamException])
override protected def writeReplace(): AnyRef = serializedStandardOutLogger
}
private val serializedStandardOutLogger = new SerializedStandardOutLogger
/**
* INTERNAL API
*/
@SerialVersionUID(1L) private[akka] class SerializedStandardOutLogger extends Serializable {
@throws(classOf[java.io.ObjectStreamException])
private def readResolve(): AnyRef = Logging.StandardOutLogger
}
val StandardOutLogger = new StandardOutLogger
/**
* Actor wrapper around the standard output logger. If
* <code>akka.loggers</code> is not set, it defaults to just this
* logger.
*/
class DefaultLogger extends Actor with StdOutLogger with RequiresMessageQueue[LoggerMessageQueueSemantics] {
override def receive: Receive = {
case InitializeLogger(_) ⇒ sender() ! LoggerInitialized
case event: LogEvent ⇒ print(event)
}
}
/**
* Returns the StackTrace for the given Throwable as a String
*/
def stackTraceFor(e: Throwable): String = e match {
case null | Error.NoCause ⇒ ""
case _: NoStackTrace ⇒ s" (${e.getClass.getName}: ${e.getMessage})"
case other ⇒
val sw = new java.io.StringWriter
val pw = new java.io.PrintWriter(sw)
pw.append('\\n')
other.printStackTrace(pw)
sw.toString
}
type MDC = Map[String, Any]
val emptyMDC: MDC = Map()
}
/**
* Logging wrapper to make nicer and optimize: provide template versions which
* evaluate .toString only if the log level is actually enabled. Typically used
* by obtaining an implementation from the Logging object:
*
* {{{
* val log = Logging(<bus>, <source object>)
* ...
* log.info("hello world!")
* }}}
*
* All log-level methods support simple interpolation templates with up to four
* arguments placed by using <code>{}</code> within the template (first string
* argument):
*
* {{{
* log.error(exception, "Exception while processing {} in state {}", msg, state)
* }}}
*
* More than four arguments can be defined by using an `Array` with the method with
* one argument parameter.
*/
trait LoggingAdapter {
type MDC = Logging.MDC
def mdc = Logging.emptyMDC
/*
* implement these as precisely as needed/possible: always returning true
* just makes the notify... methods be called every time.
*/
def isErrorEnabled: Boolean
def isWarningEnabled: Boolean
def isInfoEnabled: Boolean
def isDebugEnabled: Boolean
/*
* These actually implement the passing on of the messages to be logged.
* Will not be called if is...Enabled returned false.
*/
protected def notifyError(message: String): Unit
protected def notifyError(cause: Throwable, message: String): Unit
protected def notifyWarning(message: String): Unit
protected def notifyInfo(message: String): Unit
protected def notifyDebug(message: String): Unit
/*
* The rest is just the widening of the API for the user's convenience.
*/
/**
* Log message at error level, including the exception that caused the error.
* @see [[LoggingAdapter]]
*/
def error(cause: Throwable, message: String): Unit = { if (isErrorEnabled) notifyError(cause, message) }
/**
* Message template with 1 replacement argument.
*
* If `arg1` is an `Array` it will be expanded into replacement arguments, which is useful when
* there are more than four arguments.
* @see [[LoggingAdapter]]
*/
def error(cause: Throwable, template: String, arg1: Any): Unit = { if (isErrorEnabled) notifyError(cause, format1(template, arg1)) }
/**
* Message template with 2 replacement arguments.
* @see [[LoggingAdapter]]
*/
def error(cause: Throwable, template: String, arg1: Any, arg2: Any): Unit = { if (isErrorEnabled) notifyError(cause, format(template, arg1, arg2)) }
/**
* Message template with 3 replacement arguments.
* @see [[LoggingAdapter]]
*/
def error(cause: Throwable, template: String, arg1: Any, arg2: Any, arg3: Any): Unit = { if (isErrorEnabled) notifyError(cause, format(template, arg1, arg2, arg3)) }
/**
* Message template with 4 replacement arguments.
* @see [[LoggingAdapter]]
*/
def error(cause: Throwable, template: String, arg1: Any, arg2: Any, arg3: Any, arg4: Any): Unit = { if (isErrorEnabled) notifyError(cause, format(template, arg1, arg2, arg3, arg4)) }
/**
* Log message at error level, without providing the exception that caused the error.
* @see [[LoggingAdapter]]
*/
def error(message: String): Unit = { if (isErrorEnabled) notifyError(message) }
/**
* Message template with 1 replacement argument.
*
* If `arg1` is an `Array` it will be expanded into replacement arguments, which is useful when
* there are more than four arguments.
* @see [[LoggingAdapter]]
*/
def error(template: String, arg1: Any): Unit = { if (isErrorEnabled) notifyError(format1(template, arg1)) }
/**
* Message template with 2 replacement arguments.
* @see [[LoggingAdapter]]
*/
def error(template: String, arg1: Any, arg2: Any): Unit = { if (isErrorEnabled) notifyError(format(template, arg1, arg2)) }
/**
* Message template with 3 replacement arguments.
* @see [[LoggingAdapter]]
*/
def error(template: String, arg1: Any, arg2: Any, arg3: Any): Unit = { if (isErrorEnabled) notifyError(format(template, arg1, arg2, arg3)) }
/**
* Message template with 4 replacement arguments.
* @see [[LoggingAdapter]]
*/
def error(template: String, arg1: Any, arg2: Any, arg3: Any, arg4: Any): Unit = { if (isErrorEnabled) notifyError(format(template, arg1, arg2, arg3, arg4)) }
/**
* Log message at warning level.
* @see [[LoggingAdapter]]
*/
def warning(message: String): Unit = { if (isWarningEnabled) notifyWarning(message) }
/**
* Message template with 1 replacement argument.
*
* If `arg1` is an `Array` it will be expanded into replacement arguments, which is useful when
* there are more than four arguments.
* @see [[LoggingAdapter]]
*/
def warning(template: String, arg1: Any): Unit = { if (isWarningEnabled) notifyWarning(format1(template, arg1)) }
/**
* Message template with 2 replacement arguments.
* @see [[LoggingAdapter]]
*/
def warning(template: String, arg1: Any, arg2: Any): Unit = { if (isWarningEnabled) notifyWarning(format(template, arg1, arg2)) }
/**
* Message template with 3 replacement arguments.
* @see [[LoggingAdapter]]
*/
def warning(template: String, arg1: Any, arg2: Any, arg3: Any): Unit = { if (isWarningEnabled) notifyWarning(format(template, arg1, arg2, arg3)) }
/**
* Message template with 4 replacement arguments.
* @see [[LoggingAdapter]]
*/
def warning(template: String, arg1: Any, arg2: Any, arg3: Any, arg4: Any): Unit = { if (isWarningEnabled) notifyWarning(format(template, arg1, arg2, arg3, arg4)) }
/**
* Log message at info level.
* @see [[LoggingAdapter]]
*/
def info(message: String) { if (isInfoEnabled) notifyInfo(message) }
/**
* Message template with 1 replacement argument.
*
* If `arg1` is an `Array` it will be expanded into replacement arguments, which is useful when
* there are more than four arguments.
* @see [[LoggingAdapter]]
*/
def info(template: String, arg1: Any): Unit = { if (isInfoEnabled) notifyInfo(format1(template, arg1)) }
/**
* Message template with 2 replacement arguments.
* @see [[LoggingAdapter]]
*/
def info(template: String, arg1: Any, arg2: Any): Unit = { if (isInfoEnabled) notifyInfo(format(template, arg1, arg2)) }
/**
* Message template with 3 replacement arguments.
* @see [[LoggingAdapter]]
*/
def info(template: String, arg1: Any, arg2: Any, arg3: Any): Unit = { if (isInfoEnabled) notifyInfo(format(template, arg1, arg2, arg3)) }
/**
* Message template with 4 replacement arguments.
* @see [[LoggingAdapter]]
*/
def info(template: String, arg1: Any, arg2: Any, arg3: Any, arg4: Any): Unit = { if (isInfoEnabled) notifyInfo(format(template, arg1, arg2, arg3, arg4)) }
/**
* Log message at debug level.
* @see [[LoggingAdapter]]
*/
def debug(message: String) { if (isDebugEnabled) notifyDebug(message) }
/**
* Message template with 1 replacement argument.
*
* If `arg1` is an `Array` it will be expanded into replacement arguments, which is useful when
* there are more than four arguments.
* @see [[LoggingAdapter]]
*/
def debug(template: String, arg1: Any): Unit = { if (isDebugEnabled) notifyDebug(format1(template, arg1)) }
/**
* Message template with 2 replacement arguments.
* @see [[LoggingAdapter]]
*/
def debug(template: String, arg1: Any, arg2: Any): Unit = { if (isDebugEnabled) notifyDebug(format(template, arg1, arg2)) }
/**
* Message template with 3 replacement arguments.
* @see [[LoggingAdapter]]
*/
def debug(template: String, arg1: Any, arg2: Any, arg3: Any): Unit = { if (isDebugEnabled) notifyDebug(format(template, arg1, arg2, arg3)) }
/**
* Message template with 4 replacement arguments.
* @see [[LoggingAdapter]]
*/
def debug(template: String, arg1: Any, arg2: Any, arg3: Any, arg4: Any): Unit = { if (isDebugEnabled) notifyDebug(format(template, arg1, arg2, arg3, arg4)) }
/**
* Log message at the specified log level.
*/
def log(level: Logging.LogLevel, message: String) { if (isEnabled(level)) notifyLog(level, message) }
/**
* Message template with 1 replacement argument.
*
* If `arg1` is an `Array` it will be expanded into replacement arguments, which is useful when
* there are more than four arguments.
*/
def log(level: Logging.LogLevel, template: String, arg1: Any): Unit = { if (isEnabled(level)) notifyLog(level, format1(template, arg1)) }
/**
* Message template with 2 replacement arguments.
*/
def log(level: Logging.LogLevel, template: String, arg1: Any, arg2: Any): Unit = { if (isEnabled(level)) notifyLog(level, format(template, arg1, arg2)) }
/**
* Message template with 3 replacement arguments.
*/
def log(level: Logging.LogLevel, template: String, arg1: Any, arg2: Any, arg3: Any): Unit = { if (isEnabled(level)) notifyLog(level, format(template, arg1, arg2, arg3)) }
/**
* Message template with 4 replacement arguments.
*/
def log(level: Logging.LogLevel, template: String, arg1: Any, arg2: Any, arg3: Any, arg4: Any): Unit = { if (isEnabled(level)) notifyLog(level, format(template, arg1, arg2, arg3, arg4)) }
/**
* @return true if the specified log level is enabled
*/
final def isEnabled(level: Logging.LogLevel): Boolean = level match {
case Logging.ErrorLevel ⇒ isErrorEnabled
case Logging.WarningLevel ⇒ isWarningEnabled
case Logging.InfoLevel ⇒ isInfoEnabled
case Logging.DebugLevel ⇒ isDebugEnabled
}
final def notifyLog(level: Logging.LogLevel, message: String): Unit = level match {
case Logging.ErrorLevel ⇒ if (isErrorEnabled) notifyError(message)
case Logging.WarningLevel ⇒ if (isWarningEnabled) notifyWarning(message)
case Logging.InfoLevel ⇒ if (isInfoEnabled) notifyInfo(message)
case Logging.DebugLevel ⇒ if (isDebugEnabled) notifyDebug(message)
}
/**
* If `arg` is an `Array` it will be expanded into replacement arguments, which is useful when
* there are more than four arguments.
*/
private def format1(t: String, arg: Any): String = arg match {
case a: Array[_] if !a.getClass.getComponentType.isPrimitive ⇒ format(t, a: _*)
case a: Array[_] ⇒ format(t, (a map (_.asInstanceOf[AnyRef]): _*))
case x ⇒ format(t, x)
}
def format(t: String, arg: Any*): String = {
val sb = new java.lang.StringBuilder(64)
var p = 0
var startIndex = 0
while (p < arg.length) {
val index = t.indexOf("{}", startIndex)
if (index == -1) {
sb.append(t.substring(startIndex, t.length))
.append(" WARNING arguments left: ")
.append(arg.length - p)
p = arg.length
startIndex = t.length
} else {
sb.append(t.substring(startIndex, index))
.append(arg(p))
startIndex = index + 2
p += 1
}
}
sb.append(t.substring(startIndex, t.length)).toString
}
}
/**
* Filter of log events that is used by the `LoggingAdapter` before
* publishing log events to the `eventStream`. It can perform
* fine grained filtering based on the log source.
*
* Note that the [[EventStream]] will only subscribe `loggers` to the events
* corresponding to the `logLevel` of the `EventStream`. Therefore it is good
* practice that the `LoggingFilter` implementation first filters using the
* `logLevel` of the `EventStream` before applying more fine grained filters.
*/
trait LoggingFilter {
def isErrorEnabled(logClass: Class[_], logSource: String): Boolean
def isWarningEnabled(logClass: Class[_], logSource: String): Boolean
def isInfoEnabled(logClass: Class[_], logSource: String): Boolean
def isDebugEnabled(logClass: Class[_], logSource: String): Boolean
}
/**
* Default [[LoggingFilter]] that uses the logLevel of the `eventStream`, which
* initial value is defined in configuration. The logLevel `eventStream` can be
* changed while the system is running.
*/
class DefaultLoggingFilter(logLevel: () ⇒ Logging.LogLevel) extends LoggingFilter {
def this(settings: Settings, eventStream: EventStream) = this(() ⇒ eventStream.logLevel)
import Logging._
def isErrorEnabled(logClass: Class[_], logSource: String) = logLevel() >= ErrorLevel
def isWarningEnabled(logClass: Class[_], logSource: String) = logLevel() >= WarningLevel
def isInfoEnabled(logClass: Class[_], logSource: String) = logLevel() >= InfoLevel
def isDebugEnabled(logClass: Class[_], logSource: String) = logLevel() >= DebugLevel
}
/**
* LoggingAdapter extension which adds MDC support.
* Only recommended to be used within Actors as it isn't thread safe.
*/
trait DiagnosticLoggingAdapter extends LoggingAdapter {
import java.{ util ⇒ ju }
import Logging._
import scala.collection.JavaConverters._
private var _mdc = emptyMDC
/**
* Scala API:
* Mapped Diagnostic Context for application defined values
* which can be used in PatternLayout when `akka.event.slf4j.Slf4jLogger` is configured.
* Visit <a href="http://logback.qos.ch/manual/mdc.html">Logback Docs: MDC</a> for more information.
*
* @return A Map containing the MDC values added by the application, or empty Map if no value was added.
*/
override def mdc: MDC = _mdc
/**
* Scala API:
* Sets the values to be added to the MDC (Mapped Diagnostic Context) before the log is appended.
* These values can be used in PatternLayout when `akka.event.slf4j.Slf4jLogger` is configured.
* Visit <a href="http://logback.qos.ch/manual/mdc.html">Logback Docs: MDC</a> for more information.
*/
def mdc(mdc: MDC): Unit = _mdc = if (mdc != null) mdc else emptyMDC
/**
* Java API:
* Mapped Diagnostic Context for application defined values
* which can be used in PatternLayout when `akka.event.slf4j.Slf4jLogger` is configured.
* Visit <a href="http://logback.qos.ch/manual/mdc.html">Logback Docs: MDC</a> for more information.
* Note tha it returns a <b>COPY</b> of the actual MDC values.
* You cannot modify any value by changing the returned Map.
* Code like the following won't have any effect unless you set back the modified Map.
*
* {{{
* Map mdc = log.getMDC();
* mdc.put("key", value);
* // NEEDED
* log.setMDC(mdc);
* }}}
*
* @return A copy of the actual MDC values
*/
def getMDC: ju.Map[String, Any] = mdc.asJava
/**
* Java API:
* Sets the values to be added to the MDC (Mapped Diagnostic Context) before the log is appended.
* These values can be used in PatternLayout when `akka.event.slf4j.Slf4jLogger` is configured.
* Visit <a href="http://logback.qos.ch/manual/mdc.html">Logback Docs: MDC</a> for more information.
*/
def setMDC(jMdc: java.util.Map[String, Any]): Unit = mdc(if (jMdc != null) jMdc.asScala.toMap else emptyMDC)
/**
* Clear all entries in the MDC
*/
def clearMDC(): Unit = mdc(emptyMDC)
}
/** DO NOT INHERIT: Class is open only for use by akka-slf4j*/
@DoNotInherit
class LogMarker(val name: String)
object LogMarker {
/** The Marker is internally transferred via MDC using using this key */
private[akka] final val MDCKey = "marker"
def apply(name: String): LogMarker = new LogMarker(name)
/** Java API */
def create(name: String): LogMarker = apply(name)
def extractFromMDC(mdc: MDC): Option[String] =
mdc.get(MDCKey) match {
case Some(v) ⇒ Some(v.toString)
case None ⇒ None
}
private[akka] final val Security = apply("SECURITY")
}
/**
* [[LoggingAdapter]] extension which adds Marker support.
* Only recommended to be used within Actors as it isn't thread safe.
*/
class MarkerLoggingAdapter(
override val bus: LoggingBus,
override val logSource: String,
override val logClass: Class[_],
loggingFilter: LoggingFilter)
extends BusLogging(bus, logSource, logClass, loggingFilter) {
// TODO when breaking binary compatibility, these marker methods should become baked into LoggingAdapter itself
// For backwards compatibility, and when LoggingAdapter is created without direct
// association to an ActorSystem
def this(bus: LoggingBus, logSource: String, logClass: Class[_]) =
this(bus, logSource, logClass, new DefaultLoggingFilter(() ⇒ bus.logLevel))
/**
* Log message at error level, including the exception that caused the error.
* The marker argument can be picked up by various logging frameworks such as slf4j to mark this log statement as "special".
* @see [[LoggingAdapter]]
*/
def error(marker: LogMarker, cause: Throwable, message: String): Unit =
if (isErrorEnabled) bus.publish(Error(cause, logSource, logClass, message, mdc, marker))
/**
* Message template with 1 replacement argument.
* The marker argument can be picked up by various logging frameworks such as slf4j to mark this log statement as "special".
*
* If `arg1` is an `Array` it will be expanded into replacement arguments, which is useful when
* there are more than four arguments.
* @see [[LoggingAdapter]]
*/
def error(marker: LogMarker, cause: Throwable, template: String, arg1: Any): Unit =
if (isErrorEnabled) bus.publish(Error(cause, logSource, logClass, format1(template, arg1), mdc, marker))
/**
* Message template with 2 replacement arguments.
* The marker argument can be picked up by various logging frameworks such as slf4j to mark this log statement as "special".
* @see [[LoggingAdapter]]
*/
def error(marker: LogMarker, cause: Throwable, template: String, arg1: Any, arg2: Any): Unit =
if (isErrorEnabled) bus.publish(Error(cause, logSource, logClass, format(template, arg1, arg2), mdc, marker))
/**
* Message template with 3 replacement arguments.
* The marker argument can be picked up by various logging frameworks such as slf4j to mark this log statement as "special".
* @see [[LoggingAdapter]]
*/
def error(marker: LogMarker, cause: Throwable, template: String, arg1: Any, arg2: Any, arg3: Any): Unit =
if (isErrorEnabled) bus.publish(Error(cause, logSource, logClass, format(template, arg1, arg2, arg3), mdc, marker))
/**
* Message template with 4 replacement arguments.
* The marker argument can be picked up by various logging frameworks such as slf4j to mark this log statement as "special".
* @see [[LoggingAdapter]]
*/
def error(marker: LogMarker, cause: Throwable, template: String, arg1: Any, arg2: Any, arg3: Any, arg4: Any): Unit =
if (isErrorEnabled) bus.publish(Error(cause, logSource, logClass, format(template, arg1, arg2, arg3, arg4), mdc, marker))
/**
* Log message at error level, without providing the exception that caused the error.
* The marker argument can be picked up by various logging frameworks such as slf4j to mark this log statement as "special".
* @see [[LoggingAdapter]]
*/
def error(marker: LogMarker, message: String): Unit =
if (isErrorEnabled) bus.publish(Error(logSource, logClass, message, mdc, marker))
/**
* Message template with 1 replacement argument.
* The marker argument can be picked up by various logging frameworks such as slf4j to mark this log statement as "special".
*
* If `arg1` is an `Array` it will be expanded into replacement arguments, which is useful when
* there are more than four arguments.
* @see [[LoggingAdapter]]
*/
def error(marker: LogMarker, template: String, arg1: Any): Unit =
if (isErrorEnabled) bus.publish(Error(logSource, logClass, format1(template, arg1), mdc, marker))
/**
* Message template with 2 replacement arguments.
* The marker argument can be picked up by various logging frameworks such as slf4j to mark this log statement as "special".
* @see [[LoggingAdapter]]
*/
def error(marker: LogMarker, template: String, arg1: Any, arg2: Any): Unit =
if (isErrorEnabled) bus.publish(Error(logSource, logClass, format(template, arg1, arg2), mdc, marker))
/**
* Message template with 3 replacement arguments.
* The marker argument can be picked up by various logging frameworks such as slf4j to mark this log statement as "special".
* @see [[LoggingAdapter]]
*/
def error(marker: LogMarker, template: String, arg1: Any, arg2: Any, arg3: Any): Unit =
if (isErrorEnabled) bus.publish(Error(logSource, logClass, format(template, arg1, arg2, arg3), mdc, marker))
/**
* Message template with 4 replacement arguments.
* The marker argument can be picked up by various logging frameworks such as slf4j to mark this log statement as "special".
* @see [[LoggingAdapter]]
*/
def error(marker: LogMarker, template: String, arg1: Any, arg2: Any, arg3: Any, arg4: Any): Unit =
if (isErrorEnabled) bus.publish(Error(logSource, logClass, format(template, arg1, arg2, arg3, arg4), mdc, marker))
/**
* Log message at warning level.
* The marker argument can be picked up by various logging frameworks such as slf4j to mark this log statement as "special".
* @see [[LoggingAdapter]]
*/
def warning(marker: LogMarker, message: String): Unit =
if (isWarningEnabled) bus.publish(Warning(logSource, logClass, message, mdc, marker))
/**
* Message template with 1 replacement argument.
* The marker argument can be picked up by various logging frameworks such as slf4j to mark this log statement as "special".
*
* If `arg1` is an `Array` it will be expanded into replacement arguments, which is useful when
* there are more than four arguments.
* @see [[LoggingAdapter]]
*/
def warning(marker: LogMarker, template: String, arg1: Any): Unit =
if (isWarningEnabled) bus.publish(Warning(logSource, logClass, format1(template, arg1), mdc, marker))
/**
* Message template with 2 replacement arguments.
* The marker argument can be picked up by various logging frameworks such as slf4j to mark this log statement as "special".
* @see [[LoggingAdapter]]
*/
def warning(marker: LogMarker, template: String, arg1: Any, arg2: Any): Unit =
if (isWarningEnabled) bus.publish(Warning(logSource, logClass, format(template, arg1, arg2), mdc, marker))
/**
* Message template with 3 replacement arguments.
* The marker argument can be picked up by various logging frameworks such as slf4j to mark this log statement as "special".
* @see [[LoggingAdapter]]
*/
def warning(marker: LogMarker, template: String, arg1: Any, arg2: Any, arg3: Any): Unit =
if (isWarningEnabled) bus.publish(Warning(logSource, logClass, format(template, arg1, arg2, arg3), mdc, marker))
/**
* Message template with 4 replacement arguments.
* The marker argument can be picked up by various logging frameworks such as slf4j to mark this log statement as "special".
* @see [[LoggingAdapter]]
*/
def warning(marker: LogMarker, template: String, arg1: Any, arg2: Any, arg3: Any, arg4: Any): Unit =
if (isWarningEnabled) bus.publish(Warning(logSource, logClass, format(template, arg1, arg2, arg3, arg4), mdc, marker))
/**
* Log message at info level.
* The marker argument can be picked up by various logging frameworks such as slf4j to mark this log statement as "special".
* @see [[LoggingAdapter]]
*/
def info(marker: LogMarker, message: String): Unit =
if (isInfoEnabled) bus.publish(Info(logSource, logClass, message, mdc, marker))
/**
* Message template with 1 replacement argument.
* The marker argument can be picked up by various logging frameworks such as slf4j to mark this log statement as "special".
*
* If `arg1` is an `Array` it will be expanded into replacement arguments, which is useful when
* there are more than four arguments.
* @see [[LoggingAdapter]]
*/
def info(marker: LogMarker, template: String, arg1: Any): Unit =
if (isInfoEnabled) bus.publish(Info(logSource, logClass, format1(template, arg1), mdc, marker))
/**
* Message template with 2 replacement arguments.
* The marker argument can be picked up by various logging frameworks such as slf4j to mark this log statement as "special".
* @see [[LoggingAdapter]]
*/
def info(marker: LogMarker, template: String, arg1: Any, arg2: Any): Unit =
if (isInfoEnabled) bus.publish(Info(logSource, logClass, format(template, arg1, arg2), mdc, marker))
/**
* Message template with 3 replacement arguments.
* The marker argument can be picked up by various logging frameworks such as slf4j to mark this log statement as "special".
* @see [[LoggingAdapter]]
*/
def info(marker: LogMarker, template: String, arg1: Any, arg2: Any, arg3: Any): Unit =
if (isInfoEnabled) bus.publish(Info(logSource, logClass, format(template, arg1, arg2, arg3), mdc, marker))
/**
* Message template with 4 replacement arguments.
* The marker argument can be picked up by various logging frameworks such as slf4j to mark this log statement as "special".
* @see [[LoggingAdapter]]
*/
def info(marker: LogMarker, template: String, arg1: Any, arg2: Any, arg3: Any, arg4: Any): Unit =
if (isInfoEnabled) bus.publish(Info(logSource, logClass, format(template, arg1, arg2, arg3, arg4), mdc, marker))
/**
* Log message at debug level.
* The marker argument can be picked up by various logging frameworks such as slf4j to mark this log statement as "special".
* @see [[LoggingAdapter]]
*/
def debug(marker: LogMarker, message: String): Unit =
if (isDebugEnabled) bus.publish(Debug(logSource, logClass, message, mdc, marker))
/**
* Message template with 1 replacement argument.
* The marker argument can be picked up by various logging frameworks such as slf4j to mark this log statement as "special".
*
* If `arg1` is an `Array` it will be expanded into replacement arguments, which is useful when
* there are more than four arguments.
* @see [[LoggingAdapter]]
*/
def debug(marker: LogMarker, template: String, arg1: Any): Unit =
if (isDebugEnabled) bus.publish(Debug(logSource, logClass, format1(template, arg1), mdc, marker))
/**
* Message template with 2 replacement arguments.
* The marker argument can be picked up by various logging frameworks such as slf4j to mark this log statement as "special".
* @see [[LoggingAdapter]]
*/
def debug(marker: LogMarker, template: String, arg1: Any, arg2: Any): Unit =
if (isDebugEnabled) bus.publish(Debug(logSource, logClass, format(template, arg1, arg2), mdc, marker))
/**
* Message template with 3 replacement arguments.
* The marker argument can be picked up by various logging frameworks such as slf4j to mark this log statement as "special".
* @see [[LoggingAdapter]]
*/
def debug(marker: LogMarker, template: String, arg1: Any, arg2: Any, arg3: Any): Unit =
if (isDebugEnabled) bus.publish(Debug(logSource, logClass, format(template, arg1, arg2, arg3), mdc, marker))
/**
* Message template with 4 replacement arguments.
* The marker argument can be picked up by various logging frameworks such as slf4j to mark this log statement as "special".
* @see [[LoggingAdapter]]
*/
def debug(marker: LogMarker, template: String, arg1: Any, arg2: Any, arg3: Any, arg4: Any): Unit =
if (isDebugEnabled) bus.publish(Debug(logSource, logClass, format(template, arg1, arg2, arg3, arg4), mdc, marker))
// Copy of LoggingAdapter.format1 due to binary compatibility restrictions
private def format1(t: String, arg: Any): String = arg match {
case a: Array[_] if !a.getClass.getComponentType.isPrimitive ⇒ format(t, a: _*)
case a: Array[_] ⇒ format(t, (a map (_.asInstanceOf[AnyRef]): _*))
case x ⇒ format(t, x)
}
}
final class DiagnosticMarkerBusLoggingAdapter(
override val bus: LoggingBus,
override val logSource: String,
override val logClass: Class[_],
loggingFilter: LoggingFilter)
extends MarkerLoggingAdapter(bus, logSource, logClass, loggingFilter) with DiagnosticLoggingAdapter
/**
* [[akka.event.LoggingAdapter]] that publishes [[akka.event.Logging.LogEvent]] to event stream.
*/
class BusLogging(val bus: LoggingBus, val logSource: String, val logClass: Class[_], loggingFilter: LoggingFilter)
extends LoggingAdapter {
// For backwards compatibility, and when LoggingAdapter is created without direct
// association to an ActorSystem
def this(bus: LoggingBus, logSource: String, logClass: Class[_]) =
this(bus, logSource, logClass, new DefaultLoggingFilter(() ⇒ bus.logLevel))
import Logging._
def isErrorEnabled = loggingFilter.isErrorEnabled(logClass, logSource)
def isWarningEnabled = loggingFilter.isWarningEnabled(logClass, logSource)
def isInfoEnabled = loggingFilter.isInfoEnabled(logClass, logSource)
def isDebugEnabled = loggingFilter.isDebugEnabled(logClass, logSource)
protected def notifyError(message: String): Unit =
bus.publish(Error(logSource, logClass, message, mdc))
protected def notifyError(cause: Throwable, message: String): Unit =
bus.publish(Error(cause, logSource, logClass, message, mdc))
protected def notifyWarning(message: String): Unit =
bus.publish(Warning(logSource, logClass, message, mdc))
protected def notifyInfo(message: String): Unit =
bus.publish(Info(logSource, logClass, message, mdc))
protected def notifyDebug(message: String): Unit =
bus.publish(Debug(logSource, logClass, message, mdc))
}
/**
* NoLogging is a LoggingAdapter that does absolutely nothing – no logging at all.
*/
object NoLogging extends LoggingAdapter {
/**
* Java API to return the reference to NoLogging
* @return The NoLogging instance
*/
def getInstance = this
final override def isErrorEnabled = false
final override def isWarningEnabled = false
final override def isInfoEnabled = false
final override def isDebugEnabled = false
final protected override def notifyError(message: String): Unit = ()
final protected override def notifyError(cause: Throwable, message: String): Unit = ()
final protected override def notifyWarning(message: String): Unit = ()
final protected override def notifyInfo(message: String): Unit = ()
final protected override def notifyDebug(message: String): Unit = ()
}
/**
* NoLogging is a MarkerLoggingAdapter that does absolutely nothing – no logging at all.
*/
object NoMarkerLogging extends MarkerLoggingAdapter(null, "source", classOf[String], null) {
/**
* Java API to return the reference to NoLogging
* @return The NoLogging instance
*/
def getInstance = this
final override def isErrorEnabled = false
final override def isWarningEnabled = false
final override def isInfoEnabled = false
final override def isDebugEnabled = false
final protected override def notifyError(message: String): Unit = ()
final protected override def notifyError(cause: Throwable, message: String): Unit = ()
final protected override def notifyWarning(message: String): Unit = ()
final protected override def notifyInfo(message: String): Unit = ()
final protected override def notifyDebug(message: String): Unit = ()
final override def error(marker: LogMarker, cause: Throwable, message: String): Unit = ()
final override def error(marker: LogMarker, cause: Throwable, template: String, arg1: Any): Unit = ()
final override def error(marker: LogMarker, cause: Throwable, template: String, arg1: Any, arg2: Any): Unit = ()
final override def error(marker: LogMarker, cause: Throwable, template: String, arg1: Any, arg2: Any, arg3: Any): Unit = ()
final override def error(marker: LogMarker, cause: Throwable, template: String, arg1: Any, arg2: Any, arg3: Any, arg4: Any): Unit = ()
final override def error(marker: LogMarker, message: String): Unit = ()
final override def error(marker: LogMarker, template: String, arg1: Any): Unit = ()
final override def error(marker: LogMarker, template: String, arg1: Any, arg2: Any): Unit = ()
final override def error(marker: LogMarker, template: String, arg1: Any, arg2: Any, arg3: Any): Unit = ()
final override def error(marker: LogMarker, template: String, arg1: Any, arg2: Any, arg3: Any, arg4: Any): Unit = ()
final override def warning(marker: LogMarker, message: String): Unit = ()
final override def warning(marker: LogMarker, template: String, arg1: Any): Unit = ()
final override def warning(marker: LogMarker, template: String, arg1: Any, arg2: Any): Unit = ()
final override def warning(marker: LogMarker, template: String, arg1: Any, arg2: Any, arg3: Any): Unit = ()
final override def warning(marker: LogMarker, template: String, arg1: Any, arg2: Any, arg3: Any, arg4: Any): Unit = ()
final override def info(marker: LogMarker, message: String): Unit = ()
final override def info(marker: LogMarker, template: String, arg1: Any, arg2: Any): Unit = ()
final override def info(marker: LogMarker, template: String, arg1: Any, arg2: Any, arg3: Any): Unit = ()
final override def info(marker: LogMarker, template: String, arg1: Any, arg2: Any, arg3: Any, arg4: Any): Unit = ()
final override def debug(marker: LogMarker, message: String): Unit = ()
final override def debug(marker: LogMarker, template: String, arg1: Any): Unit = ()
final override def debug(marker: LogMarker, template: String, arg1: Any, arg2: Any): Unit = ()
final override def debug(marker: LogMarker, template: String, arg1: Any, arg2: Any, arg3: Any): Unit = ()
final override def debug(marker: LogMarker, template: String, arg1: Any, arg2: Any, arg3: Any, arg4: Any): Unit = ()
}
|
rorygraves/perf_tester
|
corpus/akka/akka-actor/src/main/scala/akka/event/Logging.scala
|
Scala
|
apache-2.0
| 73,779 |
package adt.bson.mongo.async.client
import adt.bson.mongo.async.MongoSingleCallback
import com.mongodb.async.SingleResultCallback
import play.api.libs.functional.syntax._
import scala.concurrent.{Future, Promise}
/**
* Module for converting [[SingleResultCallback]]s to [[Future]]s.
*/
object MongoAsyncConverters {
/**
* Takes a function that requires a [[MongoSingleCallback]] and provides a callback that fulfills a promise.
*
* @param action a function that requires a callback
* @return a future that is fulfilled by what is passed to the [[MongoSingleCallback]] that is provided to the action
*/
def promise[P](action: SingleResultCallback[P] => Any): Future[P] = {
val p = Promise[P]()
action(MongoSingleCallback.complete(p))
p.future
}
/**
* Same as [[promise]], except that it converts the [[Void]] type into [[Unit]].
*
* @param action a function that requires a void callback
* @return a future that is fulfilled with Unit when the action completes
*/
def promiseUnit(action: SingleResultCallback[Void] => Any): Future[Unit] = {
val p = Promise[Unit]()
action(MongoSingleCallback.complete(p).contramap(_ => ()))
p.future
}
}
|
jeffmay/bson-adt
|
bson-adt-mongo3-async/src/main/scala/adt/bson/mongo/async/client/MongoAsyncConverters.scala
|
Scala
|
apache-2.0
| 1,209 |
package ml
abstract class Estimator extends Identifiable with Params {
def fit(dataset: Dataset, paramMap: ParamMap): Transformer
def fit(dataset: Dataset, paramMaps: Array[ParamMap]): Array[Transformer] = {
paramMaps.map(fit(dataset, _))
}
/**
* Parameter for the output model.
*/
def model: Params = Params.empty
}
|
mengxr/spark-ml
|
src/main/scala/ml/Estimator.scala
|
Scala
|
apache-2.0
| 341 |
/* Copyright 2009-2016 EPFL, Lausanne */
package leon.utils
trait IncrementalState {
def push(): Unit
def pop(): Unit
final def pop(lvl: Int): Unit = List.range(0, lvl).foreach(_ => pop())
def clear(): Unit
def reset(): Unit
}
|
regb/leon
|
src/main/scala/leon/utils/IncrementalState.scala
|
Scala
|
gpl-3.0
| 241 |
package de.choffmeister.microserviceutils.auth
import java.time.Instant
import akka.http.scaladsl.model.DateTime
import akka.http.scaladsl.model.headers.{Cookie, HttpCookie, HttpCookiePair}
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server.{Directive0, Directive1, StandardRoute}
import de.choffmeister.microserviceutils.auth.models._
import scala.concurrent.Future
import scala.concurrent.duration._
final case class AuthCookie[RO](resourceOwner: RO, refreshToken: RefreshToken) {
def valid: Boolean = !refreshToken.revoked && !refreshToken.expiresAt.exists(_.isBefore(Instant.now))
}
trait CookieAuthProvider[RO, C, AT, RT, AC]
extends AuthProvider[RO, C, AT]
with AuthProviderRefreshTokenFlow[RO, C, AT, RT]
with AuthProviderAuthorizationCodeFlow[RO, C, AT, AC] {
val authClientId: String
val authRefreshTokenLifetime: FiniteDuration = 30.days
val cookieNamePrefix: String
def selectAuthCookie(authCookies: List[AuthCookie[RO]]): Directive1[Option[AuthCookie[RO]]]
def handleUnauthorizeResourceOwner: StandardRoute
override def authorizeResourceOwner: Directive1[RO] = {
extractAllAuthCookies.flatMap { authCookies =>
selectAuthCookie(authCookies).flatMap {
case Some(authCookie) if authCookie.valid =>
extractClient.flatMap {
case Right(client) =>
verifyConsentCookie(authCookie.resourceOwner, client).flatMap {
case true => provide(authCookie.resourceOwner)
case false => handleUnauthorizeResourceOwner
}
case Left(error) =>
completeWithAuthError(error)
}
case None =>
handleUnauthorizeResourceOwner
}
}
}
def provideConsentCookie(resourceOwner: RO, client: C): Directive0 = {
setCookie(createConsentCookie(resourceOwner.id, client.id))
}
def verifyConsentCookie(resourceOwner: RO, client: C): Directive1[Boolean] = {
optionalCookie(consentCookieNameTemplate(resourceOwner.id)).flatMap {
case Some(HttpCookiePair(_, value)) if value == client.id =>
deleteCookie(createConsentCookie(resourceOwner.id, client.id)).tflatMap { _ => provide(true) }
case _ =>
provide(false)
}
}
def provideAuthCookie(resourceOwner: RO): Directive0 = {
val now = Instant.ofEpochSecond(Instant.now.getEpochSecond)
val refreshTokenF = createOrUpdateRefreshToken(
refreshToken = None,
expiresAt = Some(now.plusSeconds(authRefreshTokenLifetime.toSeconds)),
scopes = Set.empty,
clientId = authClientId,
resourceOwnerId = resourceOwner.id
)
onSuccess(refreshTokenF).flatMap { refreshToken =>
setCookie(createAuthCookie(resourceOwner.id, Some(refreshToken.refreshToken)))
}
}
def extractAllAuthCookies: Directive1[List[AuthCookie[RO]]] = {
optionalHeaderValueByType(Cookie).flatMap {
case Some(cookieHeader) =>
val authCookies = cookieHeader.cookies
.collect { case HttpCookiePair(authCookieNameRegex(resourceOwnerIdEncoded), refreshToken) =>
cookieNameDecode(resourceOwnerIdEncoded) -> refreshToken
}
val auths = Future
.sequence(authCookies.map { case (resourceOwnerId, refreshTokenStr) =>
for {
resourceOwner <- findResourceOwner(resourceOwnerId)
refreshToken <- findRefreshToken(refreshTokenStr)
} yield (
resourceOwnerId,
resourceOwner.map(Right.apply).getOrElse(Left(resourceOwnerId)),
refreshToken.map(Right.apply).getOrElse(Left(refreshTokenStr))
)
})
.map(_.toList)
onSuccess(auths).flatMap { auths =>
val cookieInstructions = Future.sequence(auths.collect {
case (resourceOwnerId, Left(_), _) =>
Future.successful(
Some(
createAuthCookie(resourceOwnerId, None).withValue(value = "deleted").withExpires(DateTime.MinValue)
)
)
case (resourceOwnerId, _, Left(_)) =>
Future.successful(
Some(
createAuthCookie(resourceOwnerId, None).withValue(value = "deleted").withExpires(DateTime.MinValue)
)
)
case (resourceOwnerId, _, Right(refreshToken)) if refreshToken.clientId != authClientId =>
Future.successful(
Some(
createAuthCookie(resourceOwnerId, None)
.withValue(value = "deleted")
.withExpires(DateTime.MinValue)
)
)
case (_, _, Right(refreshToken)) if refreshToken.revoked =>
Future.successful(None)
case (_, _, Right(refreshToken)) if refreshToken.expiresAt.exists(_.isBefore(Instant.now)) =>
Future.successful(None)
case (_, _, Right(refreshToken)) =>
val updatedRefreshTokenF = createOrUpdateRefreshToken(
refreshToken = Some(refreshToken.refreshToken),
scopes = refreshToken.scopes,
clientId = refreshToken.clientId,
resourceOwnerId = refreshToken.resourceOwnerId,
expiresAt = Some(Instant.now.plusSeconds(authRefreshTokenLifetime.toSeconds))
)
updatedRefreshTokenF.map(_ => None)
})
onSuccess(cookieInstructions).flatMap { cookieInstructions =>
val cookieDirective = cookieInstructions.collect { case Some(c) => c } match {
case Nil => pass
case first :: tail => setCookie(first, tail: _*)
}
cookieDirective.tflatMap { _ =>
provide(auths.collect {
case (_, Right(resourceOwner), Right(refreshToken)) if refreshToken.clientId == authClientId =>
AuthCookie(resourceOwner, refreshToken)
})
}
}
}
case None =>
provide(List.empty)
}
}
private val cookieNameEncodingMap = List("%" -> "%25", "=" -> "%3D", ":" -> "%3A")
private def cookieNameEncode(str: String) =
cookieNameEncodingMap.foldLeft(str) { case (s, (i, o)) =>
s.replaceAll(i, o)
}
private def cookieNameDecode(str: String) =
cookieNameEncodingMap.reverse.foldLeft(str) { case (s, (i, o)) =>
s.replaceAll(o, i)
}
private def consentCookieNameTemplate(resourceOwnerId: String) =
s"${cookieNamePrefix}_${cookieNameEncode(resourceOwnerId)}_consent"
private def createConsentCookie(resourceOwnerId: String, clientId: String) =
HttpCookie(
consentCookieNameTemplate(resourceOwnerId),
clientId,
path = Some("/"),
expires = Some(DateTime.now.plus(60000L)),
httpOnly = true
)
private lazy val authCookieNameRegex = s"${cookieNamePrefix}_(.+)_token".r
private def authCookieNameTemplate(resourceOwnerId: String) =
s"${cookieNamePrefix}_${cookieNameEncode(resourceOwnerId)}_token"
private def createAuthCookie(resourceOwnerId: String, refreshToken: Option[String]) =
HttpCookie(
authCookieNameTemplate(resourceOwnerId),
refreshToken.getOrElse(""),
path = Some("/"),
expires = Some(DateTime.MaxValue),
httpOnly = true
)
}
|
choffmeister/microservice-utils
|
microservice-utils-auth/src/main/scala/de/choffmeister/microserviceutils/auth/CookieAuthProvider.scala
|
Scala
|
mit
| 7,341 |
/*
* Copyright 2001-2014 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import collection.mutable.ListBuffer
import io.Source
import java.io.{File, FileWriter, BufferedWriter}
trait GenRegularTestsBase {
def includeFile(file: File): Boolean
val baseDirPath: String
val name: String
def copyFile(inputFile: File, outputFile: File): File = {
if (!outputFile.exists || inputFile.lastModified > outputFile.lastModified) {
val writer = new BufferedWriter(new FileWriter(outputFile))
try {
val inputLines = Source.fromFile(inputFile).getLines().toList // for 2.8
for (line <- inputLines) {
writer.write(line.toString)
writer.newLine() // add for 2.8
}
}
finally {
writer.flush()
writer.close()
println("Generated " + outputFile.getAbsolutePath)
}
}
outputFile
}
def copyFile(targetBaseDir: File, filePath: String): File = {
val source = new File(filePath)
val targetDir = new File(targetBaseDir, source.getPath.substring("scalatest-test/src/test/scala/".length, source.getPath.lastIndexOf("/")))
targetDir.mkdirs()
val target = new File(targetDir, source.getName)
copyFile(source, target)
}
def processDir(dir: File, targetDir: File): Seq[File] = {
targetDir.mkdirs()
val (sourceFiles, subDirs) = dir.listFiles.partition(_.isFile)
sourceFiles.flatMap { sourceFile =>
val sourceFileName = sourceFile.getName
if (sourceFileName.endsWith(".scala") && includeFile(sourceFile)) {
val outputFile = new File(targetDir, sourceFileName)
Seq(copyFile(new File(dir, sourceFileName), outputFile))
}
else
Seq.empty[File]
} ++
subDirs.flatMap { subDir =>
processDir(subDir, new File(targetDir, subDir.getName))
}
}
def processJavaDir(dir: File, targetDir: File): Seq[File] = {
targetDir.mkdirs()
val (sourceFiles, subDirs) = dir.listFiles.partition(_.isFile)
sourceFiles.flatMap { sourceFile =>
val sourceFileName = sourceFile.getName
if (sourceFileName.endsWith(".java")) {
val sourceFile = new File(dir, sourceFileName)
val outputFile = new File(targetDir, sourceFileName)
if (!outputFile.exists || sourceFile.lastModified > outputFile.lastModified)
copyFile(sourceFile, outputFile)
Seq(outputFile)
}
else
Seq.empty[File]
} ++
subDirs.flatMap { subDir =>
processJavaDir(subDir, new File(targetDir, subDir.getName))
}
}
def copyJavaDir(targetBaseDir: File): Seq[File] = {
processJavaDir(new File("scalatest-test/src/test/java"), targetBaseDir)
}
def genTest(targetBaseDir: File, version: String, scalaVersion: String): Seq[File] = {
val sourceBaseDir = new File(baseDirPath)
val regularDir = new File(targetBaseDir, "/org/scalatest/")
processDir(sourceBaseDir, regularDir)
}
}
|
dotty-staging/scalatest
|
project/GenRegularTestBase.scala
|
Scala
|
apache-2.0
| 3,440 |
package uk.co.sprily.dh
package scheduling
import scala.concurrent.duration._
case class Take(schedule: Schedule, n: Long) extends Schedule {
case class Target(
val initiateAt: Deadline,
val timeoutAt: Deadline,
val numLeft: Long,
val underlying: schedule.Target) extends TargetLike
object Target {
def apply(numLeft: Long)(underlying: schedule.Target): Target = Target(
initiateAt = underlying.initiateAt,
timeoutAt = underlying.timeoutAt,
numLeft = numLeft,
underlying = underlying)
}
override def startAt(now: Instant) = {
val underlying = schedule.startAt(now)
Target(n-1)(underlying)
}
override def completedAt(previous: Target, now: Instant) = {
previous.numLeft match {
case numLeft if numLeft > 0 =>
schedule.completedAt(previous.underlying, now)
.map(Target.apply(numLeft-1) _)
case _ => None
}
}
override def timedOutAt(previous: Target, now: Instant) = {
previous.numLeft match {
case numLeft if numLeft > 0 =>
schedule.timedOutAt(previous.underlying, now)
.map(Target.apply(numLeft-1) _)
case _ => None
}
}
}
|
sprily/datahopper
|
scheduling/src/main/scala/take.scala
|
Scala
|
gpl-3.0
| 1,238 |
package com.soundcloud.spdt
import play.api.libs.json._
object Feature {
trait Type
object Boolean extends Type
object RealValued extends Type
}
//// sample
////
// @param features: analogous to a sparse vector for sample features
// @param label: optional class label of the sample
case class Sample(features: Map[Int,Double], label: Option[Int]) {
import Sample.sampleWriter
lazy val json = Json.stringify(Json.toJson(this))
}
object Sample {
implicit val sampleReader: Reads[Sample] = new Reads[Sample] {
def reads(json: JsValue): JsResult[Sample] = {
val keys: Seq[Int] = (json \\ "features").as[Seq[Int]]
val values: Seq[Double] = (json \\ "values").as[Seq[Double]]
val label: Option[Int] = (json \\ "label").asOpt[Int]
keys.length == values.length match {
case true => JsSuccess(Sample(keys.zip(values).toMap, label))
case false => JsError(Seq(JsPath() -> Seq(JsonValidationError("validate.error.expected.sparse.features"))))
}
}
}
implicit val sampleWriter: Writes[Sample] = new Writes[Sample] {
def writes(sample: Sample): JsValue = {
Json.obj(
"features" -> sample.features.keys,
"values" -> sample.features.values,
"label" -> sample.label)
}
}
def listFromJson(json: String): List[Sample] =
(Json.parse(json) \\ "samples").as[Seq[JsValue]].map(fromJson).toList
def fromJson(s: String): Sample = fromJson(Json.parse(s))
def fromJson(json: JsValue): Sample = json.as[Sample]
def jsonList(samples: List[Sample]) =
"""{"samples":[%s]}""".format(samples.map(_.json).mkString(","))
}
|
soundcloud/spdt
|
compute/src/main/scala/com.soundcloud.spdt/Sample.scala
|
Scala
|
mit
| 1,633 |
package me.hawkweisman.alexandria
package controllers
import me.hawkweisman.util.RichException.makeRich
import me.hawkweisman.util.concurrent.tryToFuture
import responses.{ ModelResponseMessage, ErrorModel, BookSerializer, AuthorSerializer }
import model.Tables._
import model.{ ISBN, Book, Author }
import org.scalatra._
import org.scalatra.json._
import org.scalatra.FutureSupport
import org.scalatra.swagger.{ Swagger, SwaggerSupport, StringResponseMessage }
import org.json4s._
import scala.concurrent.{ ExecutionContext, Future }
import scala.concurrent.ExecutionContext.Implicits.global
import scala.language.postfixOps
import scala.util.{ Try, Success, Failure }
import slick.driver.H2Driver.api._
/**
* Main Scalatra API control.
*
* This should be attached at the `/api/` route and handles the
* books and authors APIs.
*
* @author Hawk Weisman
* @since v0.1.0
*/
case class APIController(db: Database)(implicit val swagger: Swagger)
extends AlexandriaStack
with NativeJsonSupport
with SwaggerSupport
with FutureSupport {
protected implicit def executor: ExecutionContext = global
// Sets up automatic case class to JSON output serialization
protected implicit lazy val jsonFormats: Formats = DefaultFormats + BookSerializer + AuthorSerializer
// "description" string for Swagger
override protected val applicationName: Option[String] = Some("Books")
protected val applicationDescription = "Alexandria Books API"
// Before every action runs, set the content type to be in JSON format.
before() {
contentType = formats("json")
response.setHeader("X-Clacks-Overhead", "GNU Terry Pratchett")
}
// ---- Book API actions ---------------------------------------------------
val getByISBN = (apiOperation[Book]("getBookByISBN")
summary "Get a specific book by ISBN"
notes """Get a specific book by ISBN. If the user has book creation privileges
|and the ISBN is unrecognized, the book definition is pulled from the
|Open Library API and stored in the database before returning a book
|object as normal (but with a different status). If the user doesn't
|have book creation privileges and the ISBN is unrecognized, a 404
|is returned.""".stripMargin.replaceAll("\\n", " ")
responseMessage ModelResponseMessage(200, "Book returned", "Book")
responseMessage ModelResponseMessage(201, "Book created", "Book")
responseMessage StringResponseMessage(404, "Book not found")
responseMessage ModelResponseMessage(400, "Invalid ISBN", "ErrorModel")
parameters
pathParam[String]("isbn")
.description("ISBN number of the book to look up")
.required
)
val deleteByISBN = (apiOperation[Unit]("deleteBookByISBN")
summary "Delete a specific book by ISBN."
responseMessage StringResponseMessage(204, "Book deleted")
parameters
pathParam[String]("isbn")
.description("ISBN number of the book to delete")
.required
)
val listBooks = (apiOperation[Seq[Book]]("listBooks")
summary "Get a list of all books."
parameters (
queryParam[Int]("offset")
.description("The starting number of the books to retrieve")
.optional
.defaultValue(0),
queryParam[Int]("count")
.description("The number of books to retrieve")
.optional
.defaultValue(10),
queryParam[String]("sort-by")
.description("""How to sort the returned list. Options are "title" for alphabetical order by title and "date" for publication date.""")
.optional
)
)
val createBook = (apiOperation[Book]("createBook")
summary "Create a new book"
parameters
bodyParam[Book]("book")
.description("The book to be added to the library")
.required
)
// book API routes -------------------------------------------------------
get("/book/:isbn", operation(getByISBN)) {
logger debug s"Handling book request for ${params("isbn")}"
ISBN parse params("isbn") match {
case Success(isbn) =>
logger debug s"Successfully parsed ISBN $isbn"
val bookQuery: Future[Option[Book]] = db run booksByISBN(isbn)
.result
.headOption
new AsyncResult {
val is = bookQuery map {
case Some(book: Book) => // book exists in DB
logger info s"Found '${book.title}' for ISBN $isbn, sending to client"
Ok(book) // return 200 OK
case None => // book does not exist, but query was executed successfully
logger debug s"Could not find book for ISBN $isbn, querying OpenLibrary"
isbn.authors flatMap { newAuthors: Seq[Author] =>
logger info s"Found authors ${newAuthors mkString ", "}, inserting into DB"
db.run(authors ++= newAuthors)
} flatMap { (_) =>
isbn.book
} flatMap { book: Book =>
logger info s"Found book' ${book.title}', inserting into DB"
db.run(books += book) map { _ => Created(book) }
}
} recover {
case why: Throwable =>
logger error s"Could not create book: $why\\n${why.stackTraceString}"
InternalServerError(ErrorModel fromException (500, why))
}
}
case Failure(why) =>
logger warn s"Invalid ISBN: ${why.getMessage}\\n${why.stackTraceString}"
BadRequest(ErrorModel.fromException(400, why))
}
}
delete("/book/:isbn", operation(deleteByISBN)) {
NotImplemented("This isn't done yet.")
}
get("/books/?", operation(listBooks)) {
val offset: Int = params.get("offset") flatMap {
p: String => Try(p.toInt) toOption
} getOrElse 0
val count: Int = params.get("count") flatMap {
p: String => Try(p.toInt) toOption
} getOrElse 10
// build query
val query = params get "sort-by" match {
case Some("title")
if count > 0 => sortedBooksTitleCount(count, offset).result
case Some("title") => sortedBooksTitle(offset).result
case Some("date") => ??? // TODO: this requires dates to be parsed as times
case Some(thing) => halt(400, ErrorModel(400, s"Invalid sort-by param '$thing'."))
case None // TODO: add compiled queries for offset/count
if count > 0 => books.drop(offset).take(count).result
case None => books.drop(offset).result
}
new AsyncResult {
val is = db run query map { books =>
logger debug "Successfully got list of books"
Ok(books)
} recover {
case why: Throwable =>
InternalServerError(ErrorModel fromException (500, why))
}
}
}
post("/books/?", operation(createBook)) {
val newBook: Future[Book] = Try(parse(params("book")).extract[Book])
val query: Future[Book] = newBook flatMap { book =>
db run (books += book) map { _ => book }
}
new AsyncResult {
val is = query map { book => // TODO: what if the book was already in the DB?
logger debug s"Added book $book to database"
Created(book)
} recover {
case _: NoSuchElementException =>
BadRequest(ErrorModel(400, "No book data was sent"))
case why: MappingException =>
BadRequest(ErrorModel fromException (400, why))
case why: Throwable =>
InternalServerError(ErrorModel fromException (500, why))
}
}
}
// ---- Author API actions -------------------------------------------------
val listAuthors = (apiOperation[Seq[Author]]("listAuthors")
summary "Get all authors"
notes "Why would you want to do this? I really don't think you want this."
parameters (
queryParam[Int]("offset")
.description("The starting number of the authors to retrieve")
.optional
.defaultValue(0),
queryParam[Int]("count")
.description("The number of authors to retrieve")
.optional
.defaultValue(10),
queryParam[String]("sort-by")
.description("""How to sort the returned list. Options are "first" for first name and "last" for last name.""")
.optional
)
)
val createAuthor = (apiOperation[Author]("createAuthor")
summary "Create a new author"
responseMessage ModelResponseMessage(201, "Author added", "Author")
parameters bodyParam[Author]("author")
.description("The author to be added")
.required)
val getAuthorByName = (apiOperation[Author]("getAuthorByName")
summary "Get a specific author by name."
responseMessage ModelResponseMessage(200, "Author returned", "Author")
responseMessage StringResponseMessage(404, "Author not found")
parameters
pathParam[String]("name")
.description("The author's name")
.required
)
get("/authors/?", operation(listAuthors)) {
val offset: Int = params get "offset" flatMap {
p: String => Try(p.toInt) toOption
} getOrElse 0
val count: Int = params get "count" flatMap {
p: String => Try(p.toInt) toOption
} getOrElse 10
val query = params get "sort-by" match {
case Some("first")
if count > 0 => sortedAuthorsFirstCount(count, offset).result
case Some("first") => sortedAuthorsFirst(offset).result
case Some("last")
if count > 0 => sortedAuthorsLastCount(count, offset).result
case Some("last") => sortedAuthorsLast(offset).result
case None
if count > 0 => authors.drop(offset).take(count).result
case None => authors.drop(offset).result
case Some(thing) => halt(400, ErrorModel(400, s"Invalid sort-by param '$thing'."))
}
new AsyncResult {
val is = db run query map { authors =>
logger debug "Successfully got list of authors"
Ok(authors)
} recover {
case why: Throwable =>
InternalServerError(ErrorModel fromException (500, why))
}
}
}
post("/authors/?", operation(createAuthor)) {
NotImplemented("This isn't done yet.")
}
get("/author/:name", operation(getAuthorByName)) {
val name: Option[Array[String]] = params.get("name") map { _ split "-" }
val first = name map { _.head } getOrElse halt(400, "No first name")
val last = name map { _.last } getOrElse halt(400, "No last name")
val query = db run authorByName(first,last)
.result
.headOption
new AsyncResult { val is = query map {
case Some(author) => Ok(author)
case None => NotFound(
ErrorModel(404, "No authors found matching requested name"))
} recover {
case why: Throwable =>
InternalServerError(ErrorModel fromException (500, why))
}
}
}
}
|
alexandrialibrary/Alexandria
|
src/main/scala/me/hawkweisman/alexandria/controllers/APIController.scala
|
Scala
|
mit
| 10,723 |
/*
* Copyright (C) 2011 Mikołaj Sochacki mikolajsochacki AT gmail.com
* This file is part of VRegister (Virtual Register - Wirtualny Dziennik)
*
* VRegister is free software: you can redistribute it and/or modify
* it under the terms of the GNU AFFERO GENERAL PUBLIC LICENS Version 3
* as published by the Free Software Foundation
*
* VRegister is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU AFFERO GENERAL PUBLIC LICENS
* along with VRegister. If not, see <http://www.gnu.org/licenses/>.
*/
package eu.brosbit.opos.model
import net.liftweb.mapper._
import _root_.net.liftweb.util._
import _root_.net.liftweb.common._
class UserChangeList extends LongKeyedMapper[UserChangeList] with IdPK {
def getSingleton = UserChangeList
object firstName extends MappedString(this, 30)
object lastName extends MappedString(this, 40)
object email extends MappedEmail(this, 48)
object passStr extends MappedString(this, 12)
object phone extends MappedString(this, 12)
object date extends MappedDate(this)
object user extends MappedLongForeignKey(this, User)
}
object UserChangeList extends UserChangeList with LongKeyedMetaMapper[UserChangeList] {
}
|
mikolajs/osp
|
src/main/scala/eu/brosbit/opos/model/UserChangeList.scala
|
Scala
|
agpl-3.0
| 1,431 |
/**
* Copyright (c) 2014-2016 Snowplow Analytics Ltd.
* All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache
* License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at
* http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied.
*
* See the Apache License Version 2.0 for the specific language
* governing permissions and limitations there under.
*/
package com.snowplowanalytics.snowplow.storage
import org.json4s.JValue
/**
* Format in which Snowplow events are buffered
*
* @param json The JSON string for the event
* @param id The event ID (if it exists)
*/
final case class JsonRecord(json: JValue, id: Option[String])
|
TimothyKlim/snowplow
|
4-storage/kafka-elasticsearch-sink/src/main/scala/com/snowplowanalytics/snowplow/storage/JsonRecord.scala
|
Scala
|
apache-2.0
| 1,027 |
/*
* Copyright 2012 Twitter Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.twitter.zipkin.hadoop
import org.specs.Specification
import com.twitter.zipkin.gen
import com.twitter.scalding._
import gen.AnnotationType
import com.twitter.zipkin.hadoop.sources.{PreprocessedSpanSource, TimeGranularity, Util}
import scala.collection.JavaConverters._
import scala.collection.mutable._
/**
* Tests that PopularKeys finds the most popular keys per service
*/
class PopularAnnotationsSpec extends Specification with TupleConversions {
noDetailedDiffs()
implicit val dateRange = DateRange(RichDate(123), RichDate(321))
val endpoint = new gen.Endpoint(123, 666, "service")
val endpoint1 = new gen.Endpoint(123, 666, "service1")
val span = new gen.SpanServiceName(12345, "methodcall", 666,
List(new gen.Annotation(1000, "sr").setHost(endpoint)).asJava,
List(new gen.BinaryAnnotation("hi", null, AnnotationType.BOOL)).asJava, "service")
val span1 = new gen.SpanServiceName(12345, "methodcall", 666,
List(new gen.Annotation(1000, "sr").setHost(endpoint), new gen.Annotation(2000, "cr").setHost(endpoint)).asJava,
List(new gen.BinaryAnnotation("bye", null, AnnotationType.BOOL)).asJava, "service1")
val span2 = new gen.SpanServiceName(12345, "methodcall", 666,
List(new gen.Annotation(2000, "cr").setHost(endpoint)).asJava,
List(new gen.BinaryAnnotation("bye", null, AnnotationType.BOOL)).asJava, "service1")
"PopularAnnotations" should {
"return a map with correct entries for each service" in {
JobTest("com.twitter.zipkin.hadoop.PopularAnnotations").
arg("input", "inputFile").
arg("output", "outputFile").
arg("date", "2012-01-01T01:00").
source(PreprocessedSpanSource(TimeGranularity.Day), Util.repeatSpan(span, 101, 0, 0) ::: Util.repeatSpan(span1, 50, 200, 0) ::: Util.repeatSpan(span2, 10, 500, 0)).
sink[(String, String)](Tsv("outputFile")) {
val map = new HashMap[String, List[String]]()
map("service") = Nil
map("service1") = Nil
outputBuffer => outputBuffer foreach { e =>
map(e._1) ::= e._2
}
map("service") mustEqual List("sr")
map("service1") mustEqual List("sr", "cr")
}.run.finish
}
}
}
|
devcamcar/zipkin
|
zipkin-hadoop/src/test/scala/com/twitter/zipkin/hadoop/PopularAnnotationsSpec.scala
|
Scala
|
apache-2.0
| 2,802 |
package mesosphere.mesos.simulation
import java.util
import akka.actor.{ ActorRef, ActorSystem, Props }
import com.typesafe.config.{ Config, ConfigFactory }
import org.apache.mesos.Protos._
import org.apache.mesos.SchedulerDriver
import org.slf4j.LoggerFactory
import scala.collection.JavaConversions._
/**
* The facade to the mesos simulation.
*
* It starts/stops a new actor system for the simulation when the corresponding life-cycle methods of the
* [[org.apache.mesos.SchedulerDriver]] interface are called.
*
* The implemented commands of the driver interface are forwarded as messages to the
* [[mesosphere.mesos.simulation.DriverActor]].
* Unimplemented methods throw [[scala.NotImplementedError]]s.
*/
class SimulatedDriver(driverProps: Props) extends SchedulerDriver {
private[this] val log = LoggerFactory.getLogger(getClass)
private[this] def driverCmd(cmd: AnyRef): Status = {
driverActorRefOpt match {
case Some(driverActor) =>
log.debug(s"send driver cmd $cmd")
driverActor ! cmd
case None =>
log.debug("no driver actor configured")
}
status
}
override def declineOffer(offerId: OfferID): Status =
driverCmd(DriverActor.DeclineOffer(offerId))
override def launchTasks(offerIds: util.Collection[OfferID], tasks: util.Collection[TaskInfo]): Status =
driverCmd(DriverActor.LaunchTasks(offerIds.toSeq, tasks.toSeq))
override def killTask(taskId: TaskID): Status = driverCmd(DriverActor.KillTask(taskId))
override def reconcileTasks(statuses: util.Collection[TaskStatus]): Status = {
driverCmd(DriverActor.ReconcileTask(statuses.toSeq))
}
override def declineOffer(offerId: OfferID, filters: Filters): Status = Status.DRIVER_RUNNING
override def launchTasks(offerIds: util.Collection[OfferID], tasks: util.Collection[TaskInfo],
filters: Filters): Status = ???
override def launchTasks(offerId: OfferID, tasks: util.Collection[TaskInfo], filters: Filters): Status = ???
override def launchTasks(offerId: OfferID, tasks: util.Collection[TaskInfo]): Status = ???
override def requestResources(requests: util.Collection[Request]): Status = ???
override def sendFrameworkMessage(executorId: ExecutorID, slaveId: SlaveID, data: Array[Byte]): Status = ???
override def reviveOffers(): Status = ???
override def acknowledgeStatusUpdate(status: TaskStatus): Status = ???
// Mesos 0.23.x
// override def acceptOffers(
// o: util.Collection[OfferID], ops: util.Collection[Offer.Operation], filters: Filters): Status = ???
// life cycle
@volatile
var system: Option[ActorSystem] = None
@volatile
var driverActorRefOpt: Option[ActorRef] = None
private def status: Status = system match {
case None => Status.DRIVER_STOPPED
case Some(_) => Status.DRIVER_RUNNING
}
override def start(): Status = {
log.info("Starting simulated Mesos")
val config: Config = ConfigFactory.load(getClass.getClassLoader, "mesos-simulation.conf")
val sys: ActorSystem = ActorSystem("mesos-simulation", config)
system = Some(sys)
driverActorRefOpt = Some(sys.actorOf(driverProps, "driver"))
driverCmd(this)
Status.DRIVER_RUNNING
}
override def stop(failover: Boolean): Status = stop()
override def stop(): Status = abort()
override def abort(): Status = {
system match {
case None => Status.DRIVER_NOT_STARTED
case Some(sys) =>
sys.shutdown()
Status.DRIVER_ABORTED
}
}
override def run(): Status = {
start()
join()
}
override def join(): Status = {
system match {
case None => Status.DRIVER_NOT_STARTED
case Some(sys) =>
sys.awaitTermination()
driverActorRefOpt = None
system = None
log.info("Stopped simulated Mesos")
Status.DRIVER_STOPPED
}
}
}
|
bsideup/marathon
|
mesos-simulation/src/main/scala/mesosphere/mesos/simulation/SimulatedDriver.scala
|
Scala
|
apache-2.0
| 3,862 |
package controllers
import play.api.data.Form
import play.api.data.Forms._
import play.api.libs.json._
import play.api.mvc._
import scala.concurrent.duration._
import views._
import lila.api.Context
import lila.app._
import lila.common.config.MaxPerSecond
import lila.team.{ Requesting, Team => TeamModel }
import lila.user.{ User => UserModel, Holder }
import lila.memo.RateLimit
final class Team(
env: Env,
apiC: => Api
) extends LilaController(env) {
private def forms = env.team.forms
private def api = env.team.api
private def paginator = env.team.paginator
def all(page: Int) =
Open { implicit ctx =>
Reasonable(page) {
paginator popularTeams page map {
html.team.list.all(_)
}
}
}
def home(page: Int) =
Open { implicit ctx =>
ctx.me.??(api.hasTeams) map {
case true => Redirect(routes.Team.mine)
case false => Redirect(routes.Team.all(page))
}
}
def show(id: String, page: Int, mod: Boolean) =
Open { implicit ctx =>
Reasonable(page) {
OptionFuOk(api team id) { renderTeam(_, page, mod) }
}
}
def search(text: String, page: Int) =
OpenBody { implicit ctx =>
Reasonable(page) {
if (text.trim.isEmpty) paginator popularTeams page map { html.team.list.all(_) }
else
env.teamSearch(text, page) map { html.team.list.search(text, _) }
}
}
private def renderTeam(team: TeamModel, page: Int = 1, requestModView: Boolean = false)(implicit
ctx: Context
) =
for {
info <- env.teamInfo(team, ctx.me, withForum = canHaveForum(team, requestModView))
members <- paginator.teamMembers(team, page)
hasChat = canHaveChat(team, info, requestModView)
chat <-
hasChat ?? env.chat.api.userChat.cached
.findMine(lila.chat.Chat.Id(team.id), ctx.me)
.map(some)
_ <- env.user.lightUserApi preloadMany {
info.userIds ::: chat.??(_.chat.userIds)
}
version <- hasChat ?? env.team.version(team.id).dmap(some)
} yield html.team.show(team, members, info, chat, version, requestModView)
private def canHaveChat(team: TeamModel, info: lila.app.mashup.TeamInfo, requestModView: Boolean = false)(
implicit ctx: Context
): Boolean =
team.enabled && !team.isChatFor(_.NONE) && ctx.noKid && {
(team.isChatFor(_.LEADERS) && ctx.userId.exists(team.leaders)) ||
(team.isChatFor(_.MEMBERS) && info.mine) ||
(isGranted(_.ChatTimeout) && requestModView)
}
private def canHaveForum(team: TeamModel, requestModView: Boolean)(isMember: Boolean)(implicit
ctx: Context
): Boolean =
team.enabled && !team.isForumFor(_.NONE) && ctx.noKid && {
(team.isForumFor(_.LEADERS) && ctx.userId.exists(team.leaders)) ||
(team.isForumFor(_.MEMBERS) && isMember) ||
(isGranted(_.ModerateForum) && requestModView)
}
def users(teamId: String) =
AnonOrScoped(_.Team.Read) { req => me =>
api teamEnabled teamId flatMap {
_ ?? { team =>
val canView: Fu[Boolean] =
if (team.publicMembers) fuccess(true)
else me.??(u => api.belongsTo(team.id, u.id))
canView map {
case true =>
apiC.jsonStream(
env.team
.memberStream(team, MaxPerSecond(20))
.map(env.api.userApi.one(_, withOnline = false))
)(req)
case false => Unauthorized
}
}
}
}
def tournaments(teamId: String) =
Open { implicit ctx =>
api teamEnabled teamId flatMap {
_ ?? { team =>
env.teamInfo.tournaments(team, 30, 30) map { tours =>
Ok(html.team.tournaments.page(team, tours))
}
}
}
}
def edit(id: String) =
Auth { implicit ctx => _ =>
WithOwnedTeamEnabled(id) { team =>
fuccess(html.team.form.edit(team, forms edit team))
}
}
def update(id: String) =
AuthBody { implicit ctx => me =>
WithOwnedTeamEnabled(id) { team =>
implicit val req = ctx.body
forms
.edit(team)
.bindFromRequest()
.fold(
err => BadRequest(html.team.form.edit(team, err)).fuccess,
data => api.update(team, data, me) inject Redirect(routes.Team.show(team.id)).flashSuccess
)
}
}
def kickForm(id: String) =
Auth { implicit ctx => _ =>
WithOwnedTeamEnabled(id) { team =>
Ok(html.team.admin.kick(team, forms.members)).fuccess
}
}
def kick(id: String) =
AuthBody { implicit ctx => me =>
WithOwnedTeamEnabled(id) { team =>
implicit val req = ctx.body
forms.members.bindFromRequest().value ?? { api.kickMembers(team, _, me).sequenceFu } inject Redirect(
routes.Team.show(team.id)
).flashSuccess
}
}
def kickUser(teamId: String, userId: String) =
Scoped(_.Team.Write) { _ => me =>
api teamEnabled teamId flatMap {
_ ?? { team =>
if (team leaders me.id) api.kick(team, userId, me) inject jsonOkResult
else Forbidden(jsonError("Not your team")).fuccess
}
}
}
def leadersForm(id: String) =
Auth { implicit ctx => _ =>
WithOwnedTeamEnabled(id) { team =>
Ok(html.team.admin.leaders(team, forms leaders team)).fuccess
}
}
def leaders(id: String) =
AuthBody { implicit ctx => me =>
WithOwnedTeamEnabled(id) { team =>
implicit val req = ctx.body
forms.leaders(team).bindFromRequest().value ?? {
api.setLeaders(team, _, me, isGranted(_.ManageTeam))
} inject Redirect(
routes.Team.show(team.id)
).flashSuccess
}
}
def close(id: String) =
Secure(_.ManageTeam) { implicit ctx => me =>
OptionFuResult(api team id) { team =>
api.delete(team) >>
env.mod.logApi.deleteTeam(me.id, team.id, team.name) inject
Redirect(routes.Team all 1).flashSuccess
}
}
def disable(id: String) =
Auth { implicit ctx => me =>
WithOwnedTeamEnabled(id) { team =>
api.toggleEnabled(team, me) >>
env.mod.logApi.disableTeam(me.id, team.id, team.name) inject
Redirect(routes.Team show id).flashSuccess
}
}
def form =
Auth { implicit ctx => me =>
LimitPerWeek(me) {
forms.anyCaptcha map { captcha =>
Ok(html.team.form.create(forms.create, captcha))
}
}
}
def create =
AuthBody { implicit ctx => implicit me =>
api hasJoinedTooManyTeams me flatMap { tooMany =>
if (tooMany) tooManyTeams(me)
else
LimitPerWeek(me) {
implicit val req = ctx.body
forms.create
.bindFromRequest()
.fold(
err =>
forms.anyCaptcha map { captcha =>
BadRequest(html.team.form.create(err, captcha))
},
data =>
api.create(data, me) map { team =>
Redirect(routes.Team.show(team.id)).flashSuccess
}
)
}
}
}
def mine =
Auth { implicit ctx => me =>
api mine me map {
html.team.list.mine(_)
}
}
private def tooManyTeams(me: UserModel)(implicit ctx: Context) =
api mine me map html.team.list.mine map { BadRequest(_) }
def leader =
Auth { implicit ctx => me =>
env.team.teamRepo enabledTeamsByLeader me.id map {
html.team.list.ledByMe(_)
}
}
def join(id: String) =
AuthOrScopedBody(_.Team.Write)(
auth = implicit ctx =>
me =>
api.teamEnabled(id) flatMap {
_ ?? { team =>
api hasJoinedTooManyTeams me flatMap { tooMany =>
if (tooMany)
negotiate(
html = tooManyTeams(me),
api = _ => BadRequest(jsonError("You have joined too many teams")).fuccess
)
else
negotiate(
html = webJoin(team, me, request = none, password = none),
api = _ => {
implicit val body = ctx.body
forms
.apiRequest(team)
.bindFromRequest()
.fold(
newJsonFormError,
setup =>
api.join(team, me, setup.message, setup.password) flatMap {
case Requesting.Joined => jsonOkResult.fuccess
case Requesting.NeedRequest =>
BadRequest(jsonError("This team requires confirmation.")).fuccess
case Requesting.NeedPassword =>
BadRequest(jsonError("This team requires a password.")).fuccess
case _ => notFoundJson("Team not found")
}
)
}
)
}
}
},
scoped = implicit req =>
me =>
api.team(id) flatMap {
_ ?? { team =>
implicit val lang = reqLang
forms
.apiRequest(team)
.bindFromRequest()
.fold(
newJsonFormError,
setup =>
api.join(team, me, setup.message, setup.password) flatMap {
case Requesting.Joined => jsonOkResult.fuccess
case Requesting.NeedPassword =>
Forbidden(jsonError("This team requires a password.")).fuccess
case Requesting.NeedRequest =>
Forbidden(
jsonError(
"This team requires confirmation, and is not owned by the oAuth app owner."
)
).fuccess
}
)
}
}
)
def subscribe(teamId: String) = {
def doSub(req: Request[_], me: UserModel) =
Form(single("subscribe" -> optional(boolean)))
.bindFromRequest()(req, formBinding)
.fold(_ => funit, v => api.subscribe(teamId, me.id, ~v))
AuthOrScopedBody(_.Team.Write)(
auth = ctx => me => doSub(ctx.body, me) inject jsonOkResult,
scoped = req => me => doSub(req, me) inject jsonOkResult
)
}
def requests =
Auth { implicit ctx => me =>
import lila.memo.CacheApi._
env.team.cached.nbRequests invalidate me.id
api requestsWithUsers me map { html.team.request.all(_) }
}
def requestForm(id: String) =
Auth { implicit ctx => me =>
OptionFuOk(api.requestable(id, me)) { team =>
fuccess(html.team.request.requestForm(team, forms.request(team)))
}
}
def requestCreate(id: String) =
AuthBody { implicit ctx => me =>
OptionFuResult(api.requestable(id, me)) { team =>
implicit val req = ctx.body
forms
.request(team)
.bindFromRequest()
.fold(
err => BadRequest(html.team.request.requestForm(team, err)).fuccess,
setup =>
if (team.open) webJoin(team, me, request = none, password = setup.password)
else
setup.message ?? { msg =>
api.createRequest(team, me, msg) inject Redirect(routes.Team.show(team.id)).flashSuccess
}
)
}
}
private def webJoin(team: TeamModel, me: UserModel, request: Option[String], password: Option[String]) =
api.join(team, me, request = request, password = password) flatMap {
case Requesting.Joined => Redirect(routes.Team.show(team.id)).flashSuccess.fuccess
case Requesting.NeedRequest | Requesting.NeedPassword =>
Redirect(routes.Team.requestForm(team.id)).flashSuccess.fuccess
}
def requestProcess(requestId: String) =
AuthBody { implicit ctx => me =>
import cats.implicits._
OptionFuRedirectUrl(for {
requestOption <- api request requestId
teamOption <- requestOption.??(req => env.team.teamRepo.byLeader(req.team, me.id))
} yield (teamOption, requestOption).mapN((_, _))) { case (team, request) =>
implicit val req = ctx.body
forms.processRequest
.bindFromRequest()
.fold(
_ => fuccess(routes.Team.show(team.id).toString),
{ case (decision, url) =>
api.processRequest(team, request, decision) inject url
}
)
}
}
def declinedRequests(id: String, page: Int) =
Auth { implicit ctx => _ =>
WithOwnedTeamEnabled(id) { team =>
paginator.declinedRequests(team, page) map { requests =>
Ok(html.team.declinedRequest.all(team, requests))
}
}
}
def quit(id: String) =
AuthOrScoped(_.Team.Write)(
auth = implicit ctx =>
me =>
OptionFuResult(api.cancelRequest(id, me) orElse api.quit(id, me)) { team =>
negotiate(
html = Redirect(routes.Team.mine).flashSuccess.fuccess,
api = _ => jsonOkResult.fuccess
)
}(ctx),
scoped = _ =>
me =>
api.quit(id, me) flatMap {
_.fold(notFoundJson())(_ => jsonOkResult.fuccess)
}
)
def autocomplete =
Action.async { req =>
get("term", req).filter(_.nonEmpty) match {
case None => BadRequest("No search term provided").fuccess
case Some(term) =>
for {
teams <- api.autocomplete(term, 10)
_ <- env.user.lightUserApi preloadMany teams.map(_.createdBy)
} yield JsonOk {
JsArray(teams map { team =>
Json.obj(
"id" -> team.id,
"name" -> team.name,
"owner" -> env.user.lightUserApi.sync(team.createdBy).fold(team.createdBy)(_.name),
"members" -> team.nbMembers
)
})
}
}
}
def pmAll(id: String) =
Auth { implicit ctx => _ =>
WithOwnedTeamEnabled(id) { team =>
env.tournament.api
.visibleByTeam(team.id, 0, 20)
.dmap(_.next)
.map { tours =>
Ok(html.team.admin.pmAll(team, forms.pmAll, tours))
}
}
}
def pmAllSubmit(id: String) =
AuthOrScopedBody(_.Team.Write)(
auth = implicit ctx =>
me =>
WithOwnedTeamEnabled(id) { team =>
doPmAll(team, me)(ctx.body).fold(
err =>
env.tournament.api
.visibleByTeam(team.id, 0, 20)
.dmap(_.next)
.map { tours =>
BadRequest(html.team.admin.pmAll(team, err, tours))
},
_ map { res =>
Redirect(routes.Team.show(team.id))
.flashing(res match {
case RateLimit.Through => "success" -> ""
case RateLimit.Limited => "failure" -> rateLimitedMsg
})
}
)
},
scoped = implicit req =>
me =>
api teamEnabled id flatMap {
_.filter(_ leaders me.id) ?? { team =>
doPmAll(team, me).fold(
err => BadRequest(errorsAsJson(err)(reqLang)).fuccess,
_ map {
case RateLimit.Through => jsonOkResult
case RateLimit.Limited => rateLimitedJson
}
)
}
}
)
// API
def apiAll(page: Int) =
Action.async {
import env.team.jsonView._
import lila.common.paginator.PaginatorJson._
JsonOk {
paginator popularTeams page flatMap { pager =>
env.user.lightUserApi.preloadMany(pager.currentPageResults.flatMap(_.leaders)) inject pager
}
}
}
def apiShow(id: String) =
Open { ctx =>
JsonOptionOk {
api teamEnabled id flatMap {
_ ?? { team =>
for {
joined <- ctx.userId.?? { api.belongsTo(id, _) }
requested <- ctx.userId.ifFalse(joined).?? { env.team.requestRepo.exists(id, _) }
} yield {
env.team.jsonView.teamWrites.writes(team) ++ Json
.obj(
"joined" -> joined,
"requested" -> requested
)
}.some
}
}
}
}
def apiSearch(text: String, page: Int) =
Action.async {
import env.team.jsonView._
import lila.common.paginator.PaginatorJson._
JsonOk {
if (text.trim.isEmpty) paginator popularTeams page
else env.teamSearch(text, page)
}
}
def apiTeamsOf(username: String) =
Action.async {
import env.team.jsonView._
JsonOk {
api teamsOf username flatMap { teams =>
env.user.lightUserApi.preloadMany(teams.flatMap(_.leaders)) inject teams
}
}
}
private def doPmAll(team: TeamModel, me: UserModel)(implicit
req: Request[_]
): Either[Form[_], Fu[RateLimit.Result]] =
forms.pmAll
.bindFromRequest()
.fold(
err => Left(err),
msg =>
Right {
PmAllLimitPerTeam[RateLimit.Result](team.id, if (me.isVerifiedOrAdmin) 1 else pmAllCost) {
val url = s"${env.net.baseUrl}${routes.Team.show(team.id)}"
val full = s"""$msg
---
You received this because you are subscribed to messages of the team $url."""
env.msg.api
.multiPost(Holder(me), env.team.memberStream.subscribedIds(team, MaxPerSecond(50)), full)
.addEffect { nb =>
lila.mon.msg.teamBulk(team.id).record(nb).unit
}
// we don't wait for the stream to complete, it would make lichess time out
fuccess(RateLimit.Through)
}(RateLimit.Limited)
}
)
private val pmAllCost = 5
private val PmAllLimitPerTeam = env.memo.mongoRateLimitApi[lila.team.Team.ID](
"team.pm.all",
credits = 7 * pmAllCost,
duration = 7.days
)
private def LimitPerWeek[A <: Result](me: UserModel)(a: => Fu[A])(implicit ctx: Context): Fu[Result] =
api.countCreatedRecently(me) flatMap { count =>
val allow =
isGranted(_.ManageTeam) ||
(isGranted(_.Verified) && count < 100) ||
(isGranted(_.Teacher) && count < 10) ||
count < 3
if (allow) a
else Forbidden(views.html.site.message.teamCreateLimit).fuccess
}
private def WithOwnedTeam(teamId: String)(f: TeamModel => Fu[Result])(implicit ctx: Context): Fu[Result] =
OptionFuResult(api team teamId) { team =>
if (ctx.userId.exists(team.leaders.contains) || isGranted(_.ManageTeam)) f(team)
else renderTeam(team) map { Forbidden(_) }
}
private def WithOwnedTeamEnabled(
teamId: String
)(f: TeamModel => Fu[Result])(implicit ctx: Context): Fu[Result] =
WithOwnedTeam(teamId) { team =>
if (team.enabled || isGranted(_.ManageTeam)) f(team)
else notFound
}
}
|
luanlv/lila
|
app/controllers/Team.scala
|
Scala
|
mit
| 19,388 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.runtime
import org.apache.flink.api.common.typeinfo.TypeInformation
import org.apache.flink.api.java.functions.KeySelector
import org.apache.flink.api.java.typeutils.ResultTypeQueryable
import org.apache.flink.table.runtime.types.CRow
import org.apache.flink.table.typeutils.TypeCheckUtils.validateEqualsHashCode
import org.apache.flink.types.Row
/**
* Null-aware key selector.
*/
class CRowKeySelector(
val keyFields: Array[Int],
@transient var returnType: TypeInformation[Row])
extends KeySelector[CRow, Row]
with ResultTypeQueryable[Row] {
// check if type implements proper equals/hashCode
validateEqualsHashCode("grouping", returnType)
override def getKey(value: CRow): Row = {
Row.project(value.row, keyFields)
}
override def getProducedType: TypeInformation[Row] = returnType
}
|
GJL/flink
|
flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/runtime/CRowKeySelector.scala
|
Scala
|
apache-2.0
| 1,658 |
// copied from LMS to avoid depending on tests
// Apply any changes made there! To check:
// git log v0.9.0..HEAD test-src/epfl/test7-analysis/TestFusion.scala
// Don't forget to update the command after applying changes
package scalan.compilation.lms.arrays
import scala.lms.common.{IfThenElseFatExp, LoopFusionOpt, ScalaGenIfThenElseFat}
trait ScalaGenFatArrayLoopsFusionOpt extends ScalaGenArrayLoopsFat with ScalaGenIfThenElseFat with LoopFusionOpt {
val IR: ArrayLoopsFatExp with IfThenElseFatExp
import IR._
override def unapplySimpleIndex(e: Def[Any]) = e match {
case ArrayIndex(a, i) => Some((a,i))
case _ => super.unapplySimpleIndex(e)
}
override def unapplySimpleDomain(e: Def[Int]): Option[Exp[Any]] = e match {
case ArrayLength(a) => Some(a)
case _ => super.unapplySimpleDomain(e)
}
override def unapplySimpleCollect(e: Def[Any]) = e match {
case ArrayElem(Block(a)) => Some(a) //TODO: block??
case _ => super.unapplySimpleCollect(e)
}
override def unapplySimpleCollectIf(e: Def[Any]) = e match {
case ArrayIfElem(c,Block(a)) => Some((a,List(c))) //TODO: block?
case _ => super.unapplySimpleCollectIf(e)
}
override def applyAddCondition(e: Def[Any], c: List[Exp[Boolean]]) = e match { //TODO: should c be list or not?
case ArrayElem(a) if c.length == 1 => ArrayIfElem(c(0),a)
case ReduceElem(a) if c.length == 1 => ReduceIfElem(c(0),a)
case _ => super.applyAddCondition(e,c)
}
}
|
PCMNN/scalan-ce
|
lms-backend/core/src/main/scala/scalan/compilation/lms/arrays/ScalaGenFatArrayLoopsFusionOpt.scala
|
Scala
|
apache-2.0
| 1,469 |
// Copyright (C) 2011-2012 the original author or authors.
// See the LICENCE.txt file distributed with this work for additional
// information regarding copyright ownership.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package org.scalastyle.scalariform
import org.junit.Test
import org.scalastyle.file.CheckerTest
import org.scalatest.junit.AssertionsForJUnit
// scalastyle:off magic.number
class SpaceAfterCommentStartCheckerTest extends AssertionsForJUnit with CheckerTest {
override protected val key: String = "space.after.comment.start"
override protected val classUnderTest = classOf[SpaceAfterCommentStartChecker]
@Test def testSinglelineComments(): Unit = {
val source = """
package foobar
object Foobar {
//Incorrect
// correct comment
/////////////////////////////////
///Invalid
/// Invalid
}"""
assertErrors(List(columnError(5, 2), columnError(8, 2), columnError(9, 2)), source)
}
@Test def testMultipleInlineComments(): Unit = {
val source = """
package foobar
object Foobar {
//Incorrect
// correct comment//not wrong//check
val a = 10//Incorrect
val b = 100 //Incorrect
val c = 1// Correct
val d = 2 // Correct
val e = 3
}"""
assertErrors(List(columnError(5, 2), columnError(7, 12), columnError(8, 14)), source)
}
@Test def testMultilineComments(): Unit = {
val source = """
package foobar
object Foobar {
/*WRONG
*
*/
/* Correct */
/* Wrong*/
val d = 2 /*Wrong*/
/*
*Correct
*/
val e = 3/* Correct */
}"""
assertErrors(List(columnError(5, 2), columnError(9, 2), columnError(10, 12)), source)
}
@Test def testScaladocsComments(): Unit = {
val source = """
package foobar
object Foobar {
/**WRONG
*
*/
/** Correct */
val d = 2 /**Wrong*/
/** Wrong*/
/**
*Correct
*/
val e = 3/** Correct */
}"""
assertErrors(List(columnError(5, 2), columnError(9, 12), columnError(10, 2)), source)
}
@Test def testMixedComments(): Unit = {
val source = """
package foobar
object Foobar {
/**WRONG
*
*/
/** Correct */
val d = 2 /*Wrong*/ //Wrong
/**
*Correct
*/
val e = 3/** Correct */ // Correct
}"""
assertErrors(List(columnError(5, 2), columnError(9, 12), columnError(9, 22)), source)
}
}
|
scalastyle/scalastyle
|
src/test/scala/org/scalastyle/scalariform/SpaceAfterCommentStartCheckerTest.scala
|
Scala
|
apache-2.0
| 2,769 |
package org.bitcoins.core.protocol.blockchain
import org.bitcoins.core.consensus.Merkle
import org.bitcoins.core.crypto.DoubleSha256Digest
import org.bitcoins.core.currency.{CurrencyUnit, Satoshis}
import org.bitcoins.core.number.{Int64, UInt32, UInt64}
import org.bitcoins.core.protocol.CompactSizeUInt
import org.bitcoins.core.protocol.script.{ScriptPubKey, ScriptSignature}
import org.bitcoins.core.protocol.transaction.{Transaction, TransactionConstants, TransactionInput, TransactionOutput}
import org.bitcoins.core.script.constant.{BytesToPushOntoStack, ScriptConstant, ScriptNumber}
import org.bitcoins.core.script.crypto.OP_CHECKSIG
import org.bitcoins.core.util.BitcoinSUtil
/**
* Created by chris on 5/22/16.
* CChainParams defines various tweakable parameters of a given instance of the
* Bitcoin system. There are three: the main network on which people trade goods
* and services, the public test network which gets reset from time to time and
* a regression test mode which is intended for private networks only. It has
* minimal difficulty to ensure that blocks can be found instantly.
* Mimics this C++ interface
* https://github.com/bitcoin/bitcoin/blob/master/src/chainparams.h#L42
*/
sealed trait ChainParams {
/** Return the BIP70 network string ([[MainNetChainParams]], [[TestNetChainParams]] or [[RegTestNetChainParams]].) */
def networkId : String
/** The Genesis [[Block]] in the blockchain. */
def genesisBlock : Block
/** Filter transactions that do not match well-defined patterns
* inside of [[org.bitcoins.core.policy.Policy]]. */
def requireStandardTransaction : Boolean
/** Takes in a [[Base58Type]] and returns its base58 prefix. */
def base58Prefix(base58 : Base58Type) : Seq[Byte] = base58Prefixes(base58)
/** The mapping from a [[Base58Type]]to a String.
* Base58 prefixes for various keys/hashes on the network.
* See: [[https://en.bitcoin.it/wiki/List_of_address_prefixes]]. */
def base58Prefixes : Map[Base58Type,Seq[Byte]]
/** Creates the Genesis [[Block]] for this blockchain.
* Mimics this function in bitcoin core:
* [[https://github.com/bitcoin/bitcoin/blob/master/src/chainparams.cpp#L51]]
* @param time the time when the miner started hashing the block header
* @param nonce the nonce to mine the block
* @param nBits An encoded version of the target threshold this block’s header hash must be less than or equal to.
* @param version the block version
* @param amount the block reward for the genesis block (50 BTC in Bitcoin)
* @return the newly minted genesis block
*/
def createGenesisBlock(time : UInt32, nonce : UInt32, nBits : UInt32, version : UInt32, amount : CurrencyUnit) : Block = {
val timestamp = "The Times 03/Jan/2009 Chancellor on brink of second bailout for banks"
val asm = Seq(BytesToPushOntoStack(65), ScriptConstant("04678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5f"), OP_CHECKSIG)
val genesisOutputScript = ScriptPubKey.fromAsm(asm)
createGenesisBlock(timestamp,genesisOutputScript,time,nonce,nBits,version,amount)
}
/**
* @param timestamp a piece of data to signify when this block was first created - satoshi used an article headline
* @param scriptPubKey the scriptPubKey that needs to be satisfied in able to spend the genesis block reward
* @param time the time when the miner started hashing the block header
* @param nonce the nonce used to mine the block
* @param nBits An encoded version of the target threshold this block's header hash must be less than or equal to
* @param version the block version
* @param amount the block reward for the genesis block (50 BTC in Bitcoin)
* @return the newly minted genesis block
*/
def createGenesisBlock(timestamp : String, scriptPubKey : ScriptPubKey, time : UInt32, nonce : UInt32, nBits : UInt32,
version : UInt32, amount : CurrencyUnit) : Block = {
val timestampHex = timestamp.toCharArray.map(_.toByte)
//see https://bitcoin.stackexchange.com/questions/13122/scriptsig-coinbase-structure-of-the-genesis-block
//for a full breakdown of the genesis block & its script signature
val scriptSignature = ScriptSignature.fromAsm(Seq(BytesToPushOntoStack(4), ScriptNumber(486604799),
BytesToPushOntoStack(1), ScriptNumber(4), BytesToPushOntoStack(69), ScriptConstant(timestampHex)))
val input = TransactionInput(scriptSignature)
val output = TransactionOutput(amount,scriptPubKey)
val tx = Transaction(TransactionConstants.version,Seq(input), Seq(output), TransactionConstants.lockTime)
val prevBlockHash = DoubleSha256Digest("0000000000000000000000000000000000000000000000000000000000000000")
val merkleRootHash = Merkle.computeMerkleRoot(Seq(tx))
val genesisBlockHeader = BlockHeader(version,prevBlockHash,merkleRootHash,time,nBits,nonce)
val genesisBlock = Block(genesisBlockHeader,CompactSizeUInt(UInt64.one,1),Seq(tx))
genesisBlock
}
}
/** The Main Network parameters. */
object MainNetChainParams extends ChainParams {
override def networkId = "main"
override def genesisBlock : Block = createGenesisBlock(UInt32(1231006505), UInt32(2083236893), UInt32(0x1d00ffff), UInt32.one, Satoshis(Int64(5000000000L)))
override def requireStandardTransaction : Boolean = true
override def base58Prefixes : Map[Base58Type,Seq[Byte]] = Map(
PubKeyAddress -> BitcoinSUtil.decodeHex("00"),
ScriptAddress -> BitcoinSUtil.decodeHex("05"),
SecretKey -> BitcoinSUtil.decodeHex("80"),
ExtPublicKey -> Seq(BitcoinSUtil.hexToByte("04"), BitcoinSUtil.hexToByte("88"),
BitcoinSUtil.hexToByte("b2"), BitcoinSUtil.hexToByte("1e")),
ExtSecretKey -> Seq(BitcoinSUtil.hexToByte("04"), BitcoinSUtil.hexToByte("88"),
BitcoinSUtil.hexToByte("ad"), BitcoinSUtil.hexToByte("e4")))
}
object TestNetChainParams extends ChainParams {
override def networkId = "test"
override def genesisBlock : Block = createGenesisBlock(UInt32(1296688602), UInt32(414098458), UInt32(0x1d00ffff), UInt32.one, Satoshis(Int64(5000000000L)))
override def requireStandardTransaction : Boolean = true
override def base58Prefixes : Map[Base58Type,Seq[Byte]] = Map(
PubKeyAddress -> BitcoinSUtil.decodeHex("6f"),
ScriptAddress -> BitcoinSUtil.decodeHex("c4"),
SecretKey -> BitcoinSUtil.decodeHex("ef"),
ExtPublicKey -> Seq(BitcoinSUtil.hexToByte("04"), BitcoinSUtil.hexToByte("35"),
BitcoinSUtil.hexToByte("87"), BitcoinSUtil.hexToByte("cf")),
ExtSecretKey -> Seq(BitcoinSUtil.hexToByte("04"), BitcoinSUtil.hexToByte("35"),
BitcoinSUtil.hexToByte("83"), BitcoinSUtil.hexToByte("94")))
}
object RegTestNetChainParams extends ChainParams {
override def networkId = "regtest"
override def genesisBlock : Block = createGenesisBlock(UInt32(1296688602), UInt32(2), UInt32(0x207fffff), UInt32.one, Satoshis(Int64(5000000000L)))
override def requireStandardTransaction : Boolean = TestNetChainParams.requireStandardTransaction
override def base58Prefixes : Map[Base58Type, Seq[Byte]] = TestNetChainParams.base58Prefixes
}
sealed trait Base58Type
case object PubKeyAddress extends Base58Type
case object ScriptAddress extends Base58Type
case object SecretKey extends Base58Type
case object ExtPublicKey extends Base58Type
case object ExtSecretKey extends Base58Type
|
SuredBits/bitcoin-s-sidechains
|
src/main/scala/org/bitcoins/core/protocol/blockchain/ChainParams.scala
|
Scala
|
mit
| 7,439 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.streaming
import java.io.{FileNotFoundException, IOException, OutputStream}
import java.util.{EnumSet, UUID}
import scala.util.control.NonFatal
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs._
import org.apache.hadoop.fs.local.{LocalFs, RawLocalFs}
import org.apache.hadoop.fs.permission.FsPermission
import org.apache.spark.internal.Logging
import org.apache.spark.sql.execution.streaming.CheckpointFileManager.RenameHelperMethods
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.util.Utils
/**
* An interface to abstract out all operation related to streaming checkpoints. Most importantly,
* the key operation this interface provides is `createAtomic(path, overwrite)` which returns a
* `CancellableFSDataOutputStream`. This method is used by [[HDFSMetadataLog]] and
* [[org.apache.spark.sql.execution.streaming.state.StateStore StateStore]] implementations
* to write a complete checkpoint file atomically (i.e. no partial file will be visible), with or
* without overwrite.
*
* This higher-level interface above the Hadoop FileSystem is necessary because
* different implementation of FileSystem/FileContext may have different combination of operations
* to provide the desired atomic guarantees (e.g. write-to-temp-file-and-rename,
* direct-write-and-cancel-on-failure) and this abstraction allow different implementations while
* keeping the usage simple (`createAtomic` -> `close` or `cancel`).
*/
trait CheckpointFileManager {
import org.apache.spark.sql.execution.streaming.CheckpointFileManager._
/**
* Create a file and make its contents available atomically after the output stream is closed.
*
* @param path Path to create
* @param overwriteIfPossible If true, then the implementations must do a best-effort attempt to
* overwrite the file if it already exists. It should not throw
* any exception if the file exists. However, if false, then the
* implementation must not overwrite if the file already exists and
* must throw `FileAlreadyExistsException` in that case.
*/
def createAtomic(path: Path, overwriteIfPossible: Boolean): CancellableFSDataOutputStream
/** Open a file for reading, or throw exception if it does not exist. */
def open(path: Path): FSDataInputStream
/** List the files in a path that match a filter. */
def list(path: Path, filter: PathFilter): Array[FileStatus]
/** List all the files in a path. */
def list(path: Path): Array[FileStatus] = {
list(path, (_: Path) => true)
}
/** Make directory at the give path and all its parent directories as needed. */
def mkdirs(path: Path): Unit
/** Whether path exists */
def exists(path: Path): Boolean
/** Recursively delete a path if it exists. Should not throw exception if file doesn't exist. */
def delete(path: Path): Unit
/** Is the default file system this implementation is operating on the local file system. */
def isLocal: Boolean
}
object CheckpointFileManager extends Logging {
/**
* Additional methods in CheckpointFileManager implementations that allows
* [[RenameBasedFSDataOutputStream]] get atomicity by write-to-temp-file-and-rename
*/
sealed trait RenameHelperMethods { self => CheckpointFileManager
/** Create a file with overwrite. */
def createTempFile(path: Path): FSDataOutputStream
/**
* Rename a file.
*
* @param srcPath Source path to rename
* @param dstPath Destination path to rename to
* @param overwriteIfPossible If true, then the implementations must do a best-effort attempt to
* overwrite the file if it already exists. It should not throw
* any exception if the file exists. However, if false, then the
* implementation must not overwrite if the file already exists and
* must throw `FileAlreadyExistsException` in that case.
*/
def renameTempFile(srcPath: Path, dstPath: Path, overwriteIfPossible: Boolean): Unit
}
/**
* An interface to add the cancel() operation to [[FSDataOutputStream]]. This is used
* mainly by `CheckpointFileManager.createAtomic` to write a file atomically.
*
* @see [[CheckpointFileManager]].
*/
abstract class CancellableFSDataOutputStream(protected val underlyingStream: OutputStream)
extends FSDataOutputStream(underlyingStream, null) {
/** Cancel the `underlyingStream` and ensure that the output file is not generated. */
def cancel(): Unit
}
/**
* An implementation of [[CancellableFSDataOutputStream]] that writes a file atomically by writing
* to a temporary file and then renames.
*/
sealed class RenameBasedFSDataOutputStream(
fm: CheckpointFileManager with RenameHelperMethods,
finalPath: Path,
tempPath: Path,
overwriteIfPossible: Boolean)
extends CancellableFSDataOutputStream(fm.createTempFile(tempPath)) {
def this(fm: CheckpointFileManager with RenameHelperMethods, path: Path, overwrite: Boolean) = {
this(fm, path, generateTempPath(path), overwrite)
}
logInfo(s"Writing atomically to $finalPath using temp file $tempPath")
@volatile private var terminated = false
override def close(): Unit = synchronized {
try {
if (terminated) return
underlyingStream.close()
try {
fm.renameTempFile(tempPath, finalPath, overwriteIfPossible)
} catch {
case fe: FileAlreadyExistsException =>
logWarning(
s"Failed to rename temp file $tempPath to $finalPath because file exists", fe)
if (!overwriteIfPossible) throw fe
}
logInfo(s"Renamed temp file $tempPath to $finalPath")
} finally {
terminated = true
}
}
override def cancel(): Unit = synchronized {
try {
if (terminated) return
underlyingStream.close()
fm.delete(tempPath)
} catch {
case NonFatal(e) =>
logWarning(s"Error cancelling write to $finalPath", e)
} finally {
terminated = true
}
}
}
/** Create an instance of [[CheckpointFileManager]] based on the path and configuration. */
def create(path: Path, hadoopConf: Configuration): CheckpointFileManager = {
val fileManagerClass = hadoopConf.get(
SQLConf.STREAMING_CHECKPOINT_FILE_MANAGER_CLASS.parent.key)
if (fileManagerClass != null) {
return Utils.classForName(fileManagerClass)
.getConstructor(classOf[Path], classOf[Configuration])
.newInstance(path, hadoopConf)
.asInstanceOf[CheckpointFileManager]
}
try {
// Try to create a manager based on `FileContext` because HDFS's `FileContext.rename()
// gives atomic renames, which is what we rely on for the default implementation
// `CheckpointFileManager.createAtomic`.
new FileContextBasedCheckpointFileManager(path, hadoopConf)
} catch {
case e: UnsupportedFileSystemException =>
logWarning(
"Could not use FileContext API for managing Structured Streaming checkpoint files at " +
s"$path. Using FileSystem API instead for managing log files. If the implementation " +
s"of FileSystem.rename() is not atomic, then the correctness and fault-tolerance of" +
s"your Structured Streaming is not guaranteed.")
new FileSystemBasedCheckpointFileManager(path, hadoopConf)
}
}
private def generateTempPath(path: Path): Path = {
val tc = org.apache.spark.TaskContext.get
val tid = if (tc != null) ".TID" + tc.taskAttemptId else ""
new Path(path.getParent, s".${path.getName}.${UUID.randomUUID}${tid}.tmp")
}
}
/** An implementation of [[CheckpointFileManager]] using Hadoop's [[FileSystem]] API. */
class FileSystemBasedCheckpointFileManager(path: Path, hadoopConf: Configuration)
extends CheckpointFileManager with RenameHelperMethods with Logging {
import CheckpointFileManager._
protected val fs = path.getFileSystem(hadoopConf)
override def list(path: Path, filter: PathFilter): Array[FileStatus] = {
fs.listStatus(path, filter)
}
override def mkdirs(path: Path): Unit = {
fs.mkdirs(path, FsPermission.getDirDefault)
}
override def createTempFile(path: Path): FSDataOutputStream = {
fs.create(path, true)
}
override def createAtomic(
path: Path,
overwriteIfPossible: Boolean): CancellableFSDataOutputStream = {
new RenameBasedFSDataOutputStream(this, path, overwriteIfPossible)
}
override def open(path: Path): FSDataInputStream = {
fs.open(path)
}
override def exists(path: Path): Boolean =
try {
fs.getFileStatus(path) != null
} catch {
case _: FileNotFoundException => false
}
override def renameTempFile(srcPath: Path, dstPath: Path, overwriteIfPossible: Boolean): Unit = {
if (!overwriteIfPossible && fs.exists(dstPath)) {
throw new FileAlreadyExistsException(
s"Failed to rename $srcPath to $dstPath as destination already exists")
}
if (!fs.rename(srcPath, dstPath)) {
// FileSystem.rename() returning false is very ambiguous as it can be for many reasons.
// This tries to make a best effort attempt to return the most appropriate exception.
if (fs.exists(dstPath)) {
if (!overwriteIfPossible) {
throw new FileAlreadyExistsException(s"Failed to rename as $dstPath already exists")
}
} else if (!fs.exists(srcPath)) {
throw new FileNotFoundException(s"Failed to rename as $srcPath was not found")
} else {
val msg = s"Failed to rename temp file $srcPath to $dstPath as rename returned false"
logWarning(msg)
throw new IOException(msg)
}
}
}
override def delete(path: Path): Unit = {
try {
fs.delete(path, true)
} catch {
case e: FileNotFoundException =>
logInfo(s"Failed to delete $path as it does not exist")
// ignore if file has already been deleted
}
}
override def isLocal: Boolean = fs match {
case _: LocalFileSystem | _: RawLocalFileSystem => true
case _ => false
}
}
/** An implementation of [[CheckpointFileManager]] using Hadoop's [[FileContext]] API. */
class FileContextBasedCheckpointFileManager(path: Path, hadoopConf: Configuration)
extends CheckpointFileManager with RenameHelperMethods with Logging {
import CheckpointFileManager._
private val fc = if (path.toUri.getScheme == null) {
FileContext.getFileContext(hadoopConf)
} else {
FileContext.getFileContext(path.toUri, hadoopConf)
}
override def list(path: Path, filter: PathFilter): Array[FileStatus] = {
fc.util.listStatus(path, filter)
}
override def mkdirs(path: Path): Unit = {
fc.mkdir(path, FsPermission.getDirDefault, true)
}
override def createTempFile(path: Path): FSDataOutputStream = {
import CreateFlag._
import Options._
fc.create(
path, EnumSet.of(CREATE, OVERWRITE), CreateOpts.checksumParam(ChecksumOpt.createDisabled()))
}
override def createAtomic(
path: Path,
overwriteIfPossible: Boolean): CancellableFSDataOutputStream = {
new RenameBasedFSDataOutputStream(this, path, overwriteIfPossible)
}
override def open(path: Path): FSDataInputStream = {
fc.open(path)
}
override def exists(path: Path): Boolean = {
fc.util.exists(path)
}
override def renameTempFile(srcPath: Path, dstPath: Path, overwriteIfPossible: Boolean): Unit = {
import Options.Rename._
fc.rename(srcPath, dstPath, if (overwriteIfPossible) OVERWRITE else NONE)
// TODO: this is a workaround of HADOOP-16255 - remove this when HADOOP-16255 is resolved
mayRemoveCrcFile(srcPath)
}
override def delete(path: Path): Unit = {
try {
fc.delete(path, true)
} catch {
case e: FileNotFoundException =>
// ignore if file has already been deleted
}
}
override def isLocal: Boolean = fc.getDefaultFileSystem match {
case _: LocalFs | _: RawLocalFs => true // LocalFs = RawLocalFs + ChecksumFs
case _ => false
}
private def mayRemoveCrcFile(path: Path): Unit = {
try {
val checksumFile = new Path(path.getParent, s".${path.getName}.crc")
if (exists(checksumFile)) {
// checksum file exists, deleting it
delete(checksumFile)
}
} catch {
case NonFatal(_) => // ignore, we are removing crc file as "best-effort"
}
}
}
|
pgandhi999/spark
|
sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/CheckpointFileManager.scala
|
Scala
|
apache-2.0
| 13,503 |
package skychat
import java.util.UUID
import net.milkbowl.vault.chat.Chat
import org.bukkit.{Bukkit, OfflinePlayer}
import org.bukkit.command.{Command, CommandSender}
import org.bukkit.plugin.java.JavaPlugin
import skychat.irc.SkyChatIrc
class SkyChat extends JavaPlugin {
override def onEnable(): Unit = {
if (!getServer.getPluginManager.isPluginEnabled("Vault")) {
getLogger.severe("SkyChat disabled due to lack of vault!")
getServer.getPluginManager.disablePlugin(this)
return
}
SkyChatIrc.init(getConfig.getConfigurationSection("irc"))
SkyChat.inst = this
SkyChat.chat = getServer.getServicesManager.getRegistration(classOf[Chat]).getProvider
PlayerData.init()
}
def doMsg(player: OfflinePlayer, who: String, msg: String): Boolean = Option(Bukkit.getPlayer(who)) match {
case Some(p) =>
doMsgImpl(player, p.getUniqueId, msg)
true
case None =>
player.getPlayer.sendMessage("That player isn't online!")
false
}
def doMsgImpl(player: OfflinePlayer, id: UUID, msg: String): Unit = PlayerData.getPlayer(player).foreach { p =>
Option(getServer.getPlayer(id)) match {
case None => player.getPlayer.sendMessage("That player isn't online!")
case Some(tgt) =>
PlayerData.update(PlayerData.Player.update(tgt).copy(replyTarget = Some(player.getUniqueId)))
PlayerData.update(p.copy(replyTarget = Some(id)))
tgt.sendMessage(TextFormat.ingameMsgRx(Map(
"prefix" -> SkyChat.chat.getPlayerPrefix(player.getPlayer),
"name" -> player.getPlayer.getDisplayName,
"msg" -> msg
)))
player.getPlayer.sendMessage(TextFormat.ingameMsgEcho(Map(
"prefix" -> SkyChat.chat.getPlayerPrefix(tgt),
"name" -> tgt.getDisplayName,
"msg" -> msg
)))
}
}
def doReply(player: OfflinePlayer, msg: String): Boolean = PlayerData.getPlayer(player).exists(_.replyTarget match {
case Some(id) =>
doMsgImpl(player, id, msg)
true
case None =>
player.getPlayer.sendMessage("No reply target!")
false
})
override def onCommand(sender: CommandSender, command: Command, label: String, args: Array[String]): Boolean = command.getName match {
case "r" => doReply(sender.asInstanceOf[OfflinePlayer], args.mkString(" "))
case "msg" => doMsg(sender.asInstanceOf[OfflinePlayer], args.head, args.tail.mkString(" "))
case "me" => ActionHandler.handle(sender.asInstanceOf[OfflinePlayer], args.mkString(" "))
}
}
object SkyChat {
var chat: Chat = _
var inst: SkyChat = _
}
|
robotbrain/skychat
|
src/main/scala/skychat/SkyChat.scala
|
Scala
|
apache-2.0
| 2,593 |
package com.datastax.spark.connector.cql
import java.net.InetAddress
import scala.concurrent.duration._
import scala.language.postfixOps
import scala.util.Try
import scala.util.control.NonFatal
import org.apache.spark.{Logging, SparkConf}
import com.datastax.driver.core.{ProtocolOptions, SSLOptions}
import com.datastax.spark.connector.util.ConfigCheck
/** Stores configuration of a connection to Cassandra.
* Provides information about cluster nodes, ports and optional credentials for authentication. */
case class CassandraConnectorConf(
hosts: Set[InetAddress],
port: Int = CassandraConnectorConf.DefaultPort,
authConf: AuthConf = NoAuthConf,
localDC: Option[String] = None,
keepAliveMillis: Int = CassandraConnectorConf.DefaultKeepAliveMillis,
minReconnectionDelayMillis: Int = CassandraConnectorConf.DefaultMinReconnectionDelayMillis,
maxReconnectionDelayMillis: Int = CassandraConnectorConf.DefaultMaxReconnectionDelayMillis,
compression: ProtocolOptions.Compression = CassandraConnectorConf.DefaultCassandraConnectionCompression,
queryRetryCount: Int = CassandraConnectorConf.DefaultQueryRetryCount,
connectTimeoutMillis: Int = CassandraConnectorConf.DefaultConnectTimeoutMillis,
readTimeoutMillis: Int = CassandraConnectorConf.DefaultReadTimeoutMillis,
connectionFactory: CassandraConnectionFactory = DefaultConnectionFactory,
cassandraSSLConf: CassandraConnectorConf.CassandraSSLConf = CassandraConnectorConf.DefaultCassandraSSLConf,
queryRetryDelay: CassandraConnectorConf.RetryDelayConf = CassandraConnectorConf.DefaultQueryRetryDelay
)
/** A factory for [[CassandraConnectorConf]] objects.
* Allows for manually setting connection properties or reading them from [[org.apache.spark.SparkConf SparkConf]]
* object. By embedding connection information in [[org.apache.spark.SparkConf SparkConf]],
* [[org.apache.spark.SparkContext SparkContext]] can offer Cassandra specific methods which require establishing
* connections to a Cassandra cluster. */
object CassandraConnectorConf extends Logging {
case class CassandraSSLConf(
enabled: Boolean = false,
trustStorePath: Option[String] = None,
trustStorePassword: Option[String] = None,
trustStoreType: String = "JKS",
protocol: String = "TLS",
enabledAlgorithms: Array[String] = SSLOptions.DEFAULT_SSL_CIPHER_SUITES
)
trait RetryDelayConf {
def forRetry(retryNumber: Int): Duration
}
object RetryDelayConf extends Serializable {
case class ConstantDelay(delay: Duration) extends RetryDelayConf {
require(delay.length >= 0, "Delay must not be negative")
override def forRetry(nbRetry: Int) = delay
}
case class LinearDelay(initialDelay: Duration, increaseBy: Duration) extends RetryDelayConf {
require(initialDelay.length >= 0, "Initial delay must not be negative")
require(increaseBy.length > 0, "Delay increase must be greater than 0")
override def forRetry(nbRetry: Int) = initialDelay + (increaseBy * (nbRetry - 1).max(0))
}
case class ExponentialDelay(initialDelay: Duration, increaseBy: Double) extends RetryDelayConf {
require(initialDelay.length >= 0, "Initial delay must not be negative")
require(increaseBy > 0, "Delay increase must be greater than 0")
override def forRetry(nbRetry: Int) =
(initialDelay.toMillis * math.pow(increaseBy, (nbRetry - 1).max(0))).toLong milliseconds
}
private val ConstantDelayEx = """(\\d+)""".r
private val LinearDelayEx = """(\\d+)\\+(.+)""".r
private val ExponentialDelayEx = """(\\d+)\\*(.+)""".r
def fromString(s: String): Option[RetryDelayConf] = s.trim match {
case "" => None
case ConstantDelayEx(delayStr) =>
val d = for (delay <- Try(delayStr.toInt)) yield ConstantDelay(delay milliseconds)
d.toOption.orElse(throw new IllegalArgumentException(
s"Invalid format of constant delay: $s; it should be <integer number>."))
case LinearDelayEx(delayStr, increaseStr) =>
val d = for (delay <- Try(delayStr.toInt); increaseBy <- Try(increaseStr.toInt))
yield LinearDelay(delay milliseconds, increaseBy milliseconds)
d.toOption.orElse(throw new IllegalArgumentException(
s"Invalid format of linearly increasing delay: $s; it should be <integer number>+<integer number>"))
case ExponentialDelayEx(delayStr, increaseStr) =>
val d = for (delay <- Try(delayStr.toInt); increaseBy <- Try(increaseStr.toDouble))
yield ExponentialDelay(delay milliseconds, increaseBy)
d.toOption.orElse(throw new IllegalArgumentException(
s"Invalid format of exponentially increasing delay: $s; it should be <integer number>*<real number>"))
}
}
val DefaultPort = 9042
val DefaultKeepAliveMillis = 250
val DefaultMinReconnectionDelayMillis = 1000
val DefaultMaxReconnectionDelayMillis = 60000
val DefaultQueryRetryCount = 10
val DefaultQueryRetryDelay = RetryDelayConf.ExponentialDelay(4 seconds, 1.5d)
val DefaultConnectTimeoutMillis = 5000
val DefaultReadTimeoutMillis = 120000
val DefaultCassandraConnectionCompression = ProtocolOptions.Compression.NONE
val DefaultCassandraSSLConf = CassandraSSLConf()
val CassandraConnectionHostProperty = "spark.cassandra.connection.host"
val CassandraConnectionPortProperty = "spark.cassandra.connection.port"
val CassandraConnectionLocalDCProperty = "spark.cassandra.connection.local_dc"
val CassandraConnectionTimeoutProperty = "spark.cassandra.connection.timeout_ms"
val CassandraConnectionKeepAliveProperty = "spark.cassandra.connection.keep_alive_ms"
val CassandraMinReconnectionDelayProperty = "spark.cassandra.connection.reconnection_delay_ms.min"
val CassandraMaxReconnectionDelayProperty = "spark.cassandra.connection.reconnection_delay_ms.max"
val CassandraConnectionCompressionProperty = "spark.cassandra.connection.compression"
val CassandraQueryRetryCountProperty = "spark.cassandra.query.retry.count"
val CassandraQueryRetryDelayProperty = "spark.cassandra.query.retry.delay"
val CassandraReadTimeoutProperty = "spark.cassandra.read.timeout_ms"
val CassandraConnectionSSLEnabledProperty = "spark.cassandra.connection.ssl.enabled"
val CassandraConnectionSSLTrustStorePathProperty = "spark.cassandra.connection.ssl.trustStore.path"
val CassandraConnectionSSLTrustStorePasswordProperty = "spark.cassandra.connection.ssl.trustStore.password"
val CassandraConnectionSSLTrustStoreTypeProperty = "spark.cassandra.connection.ssl.trustStore.type"
val CassandraConnectionSSLProtocolProperty = "spark.cassandra.connection.ssl.protocol"
val CassandraConnectionSSLEnabledAlgorithmsProperty = "spark.cassandra.connection.ssl.enabledAlgorithms"
//Whitelist for allowed CassandraConnector environment variables
val Properties = Set(
CassandraConnectionHostProperty,
CassandraConnectionPortProperty,
CassandraConnectionLocalDCProperty,
CassandraConnectionTimeoutProperty,
CassandraConnectionKeepAliveProperty,
CassandraMinReconnectionDelayProperty,
CassandraMaxReconnectionDelayProperty,
CassandraConnectionCompressionProperty,
CassandraQueryRetryCountProperty,
CassandraQueryRetryDelayProperty,
CassandraReadTimeoutProperty,
CassandraConnectionSSLEnabledProperty,
CassandraConnectionSSLTrustStorePathProperty,
CassandraConnectionSSLTrustStorePasswordProperty,
CassandraConnectionSSLTrustStoreTypeProperty,
CassandraConnectionSSLProtocolProperty,
CassandraConnectionSSLEnabledAlgorithmsProperty
)
private def resolveHost(hostName: String): Option[InetAddress] = {
try Some(InetAddress.getByName(hostName))
catch {
case NonFatal(e) =>
logError(s"Unknown host '$hostName'", e)
None
}
}
def apply(conf: SparkConf): CassandraConnectorConf = {
ConfigCheck.checkConfig(conf)
val hostsStr = conf.get(CassandraConnectionHostProperty, InetAddress.getLocalHost.getHostAddress)
val hosts = for {
hostName <- hostsStr.split(",").toSet[String]
hostAddress <- resolveHost(hostName.trim)
} yield hostAddress
val port = conf.getInt(CassandraConnectionPortProperty, DefaultPort)
val authConf = AuthConf.fromSparkConf(conf)
val keepAlive = conf.getInt(CassandraConnectionKeepAliveProperty, DefaultKeepAliveMillis)
val localDC = conf.getOption(CassandraConnectionLocalDCProperty)
val minReconnectionDelay = conf.getInt(CassandraMinReconnectionDelayProperty, DefaultMinReconnectionDelayMillis)
val maxReconnectionDelay = conf.getInt(CassandraMaxReconnectionDelayProperty, DefaultMaxReconnectionDelayMillis)
val queryRetryCount = conf.getInt(CassandraQueryRetryCountProperty, DefaultQueryRetryCount)
val queryRetryDelay = RetryDelayConf.fromString(conf.get(CassandraQueryRetryDelayProperty, ""))
.getOrElse(DefaultQueryRetryDelay)
val connectTimeout = conf.getInt(CassandraConnectionTimeoutProperty, DefaultConnectTimeoutMillis)
val readTimeout = conf.getInt(CassandraReadTimeoutProperty, DefaultReadTimeoutMillis)
val compression = conf.getOption(CassandraConnectionCompressionProperty)
.map(ProtocolOptions.Compression.valueOf).getOrElse(DefaultCassandraConnectionCompression)
val connectionFactory = CassandraConnectionFactory.fromSparkConf(conf)
val sslEnabled = conf.getBoolean(CassandraConnectionSSLEnabledProperty,
defaultValue = DefaultCassandraSSLConf.enabled)
val sslTrustStorePath = conf.getOption(CassandraConnectionSSLTrustStorePathProperty)
val sslTrustStorePassword = conf.getOption(CassandraConnectionSSLTrustStorePasswordProperty)
val sslTrustStoreType = conf.get(CassandraConnectionSSLTrustStoreTypeProperty,
defaultValue = DefaultCassandraSSLConf.trustStoreType)
val sslProtocol = conf.get(CassandraConnectionSSLProtocolProperty,
defaultValue = DefaultCassandraSSLConf.protocol)
val sslEnabledAlgorithms = conf.getOption(CassandraConnectionSSLEnabledAlgorithmsProperty)
.map(_.split(",").map(_.trim)).getOrElse(DefaultCassandraSSLConf.enabledAlgorithms)
val cassandraSSLConf = CassandraSSLConf(
enabled = sslEnabled,
trustStorePath = sslTrustStorePath,
trustStorePassword = sslTrustStorePassword,
trustStoreType = sslTrustStoreType,
protocol = sslProtocol,
enabledAlgorithms = sslEnabledAlgorithms
)
CassandraConnectorConf(
hosts = hosts,
port = port,
authConf = authConf,
localDC = localDC,
keepAliveMillis = keepAlive,
minReconnectionDelayMillis = minReconnectionDelay,
maxReconnectionDelayMillis = maxReconnectionDelay,
compression = compression,
queryRetryCount = queryRetryCount,
connectTimeoutMillis = connectTimeout,
readTimeoutMillis = readTimeout,
connectionFactory = connectionFactory,
cassandraSSLConf = cassandraSSLConf,
queryRetryDelay = queryRetryDelay
)
}
}
|
EchoSYSU/spark-cassandra-connector
|
spark-cassandra-connector/src/main/scala/com/datastax/spark/connector/cql/CassandraConnectorConf.scala
|
Scala
|
apache-2.0
| 10,964 |
package antonkulaga.projects.templates
import scalacss.Defaults._
object MyStyles extends StyleSheet.Standalone {
/* "#container" - (
width(1024 px),
height(768 px)
)
*/
}
|
antonkulaga/personal
|
app/jvm/src/main/scala/antonkulaga/projects/templates/MyStyles.scala
|
Scala
|
mpl-2.0
| 187 |
package com.viaden.crm.spark.experiments
import java.util.Date
import org.joda.time.format.DateTimeFormat
/**
* Created by remeniuk on 28.12.14.
*/
object Convertions {
private val MillisInDay = 86400000
val dateFormat = DateTimeFormat.forPattern("YYYY-MM-DD HH:mm:ss")
def dateToTimestamp(date: String) = dateFormat.parseDateTime(date).getMillis / MillisInDay
def timestampToDate(timestamp: Long) = new Date(timestamp.toLong * MillisInDay)
abstract class NullableDatabaseValue[T](value: String, parse: String => T) {
def fromString: T = if (value == "NULL") parse("0") else parse(value)
}
implicit class NullableString(value: String) {
def toStringOrEmpty = if (value == "NULL") "Unknown" else value
}
implicit class NullableInt(value: String) extends NullableDatabaseValue[Int](value, _.toInt) {
def toIntOrEmpty = fromString
}
implicit class NullableDouble(value: String) extends NullableDatabaseValue[Double](value, _.toDouble) {
def toDoubleOrEmpty = fromString
}
implicit class NullableLong(value: String) extends NullableDatabaseValue[Long](value, _.toLong) {
def toLongOrEmpty = fromString
}
}
|
remeniuk/spark-experiments
|
src/main/scala/com/viaden/crm/spark/experiments/Convertions.scala
|
Scala
|
mit
| 1,163 |
package com.twitter.finagle.mux.transport
import com.twitter.finagle.{Failure, FailureFlags}
import com.twitter.finagle.util.BufWriter
import com.twitter.io.Buf
import org.junit.runner.RunWith
import org.scalatest.FunSuite
import org.scalatest.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class MuxFailureTest extends FunSuite {
class FlaggedClass(val flags: Long) extends FailureFlags[FlaggedClass] {
protected def copyWithFlags(f: Long): FlaggedClass = ???
}
test("Flag values") {
assert(MuxFailure.Retryable == 1L << 0)
assert(MuxFailure.Rejected == 1L << 1)
assert(MuxFailure.NonRetryable == 1L << 2)
}
test("convert flags with c.t.f.FailureFlags") {
val flagTests = Seq(
(FailureFlags.Retryable|FailureFlags.Rejected, MuxFailure.Retryable|MuxFailure.Rejected),
(FailureFlags.NonRetryable, MuxFailure.NonRetryable),
(0L, 0L)
)
flagTests.foreach {
case (finagle, mux) =>
assert(MuxFailure(mux).finagleFlags == finagle)
assert(MuxFailure.fromThrow(Failure(":(", finagle)).flags == mux)
assert(MuxFailure.fromThrow(new FlaggedClass(finagle)).flags == mux)
}
}
test("Convert to & from context pairs") {
val muxFail = MuxFailure(MuxFailure.NonRetryable)
val expectedContext = Seq(
(Buf.Utf8("MuxFailure"), BufWriter.fixed(8).writeLongBE(MuxFailure.NonRetryable).owned())
)
assert(muxFail.contexts.equals(expectedContext))
// Round trip
assert(MuxFailure.fromContexts(muxFail.contexts) == Some(muxFail))
// Special case - No relevant info, so no need to pass context.
assert(MuxFailure.Empty.contexts == Nil)
}
}
|
spockz/finagle
|
finagle-mux/src/test/scala/com/twitter/finagle/mux/transport/MuxFailureTest.scala
|
Scala
|
apache-2.0
| 1,659 |
object Test:
@annotation.tailrec
def sum(n: Int, acc: Int = 0): Int =
if n != 0 then return sum(n - 1, acc + n)
acc
@annotation.tailrec
def isEven(n: Int): Boolean =
if n != 0 && n != 1 then return isEven(n - 2)
if n == 1 then return false
true
@annotation.tailrec
def isEvenApply(n: Int): Boolean =
// Return inside an `Apply.fun`
(
if n != 0 && n != 1 then return isEvenApply(n - 2)
else if n == 1 then return false
else (x: Boolean) => x
)(true)
@annotation.tailrec
def isEvenWhile(n: Int): Boolean =
// Return inside a `WhileDo.cond`
while(
if n != 0 && n != 1 then return isEvenWhile(n - 2)
else if n == 1 then return false
else true
) {}
true
@annotation.tailrec
def isEvenReturn(n: Int): Boolean =
// Return inside a `Return`
return
if n != 0 && n != 1 then return isEvenReturn(n - 2)
else if n == 1 then return false
else true
@annotation.tailrec
def names(l: List[(String, String) | Null], acc: List[String] = Nil): List[String] =
l match
case Nil => acc.reverse
case x :: xs =>
if x == null then return names(xs, acc)
val displayName = x._1 + " " + x._2
names(xs, displayName :: acc)
def nonTail(l: List[Int]): List[Int] =
l match
case Nil => Nil
case x :: xs =>
// The call to nonTail should *not* be eliminated
(x + 1) :: nonTail(xs)
def main(args: Array[String]): Unit =
println(sum(3))
println(isEven(5))
println(isEvenApply(6))
println(isEvenWhile(7))
println(isEvenReturn(8))
println(names(List(("Ada", "Lovelace"), null, ("Alan", "Turing"))).mkString(", "))
println(nonTail(List(8, 9)))
|
dotty-staging/dotty
|
tests/run/tailrec-return.scala
|
Scala
|
apache-2.0
| 1,753 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.streaming
import java.util.UUID
import scala.util.Random
import org.scalatest.BeforeAndAfter
import org.apache.spark.scheduler.ExecutorCacheTaskLocation
import org.apache.spark.sql.{AnalysisException, DataFrame, Row, SparkSession}
import org.apache.spark.sql.catalyst.analysis.StreamingJoinHelper
import org.apache.spark.sql.catalyst.expressions.{AttributeReference, AttributeSet, Literal}
import org.apache.spark.sql.catalyst.plans.logical.{EventTimeWatermark, Filter}
import org.apache.spark.sql.catalyst.trees.TreeNode
import org.apache.spark.sql.execution.{FileSourceScanExec, LogicalRDD}
import org.apache.spark.sql.execution.datasources.LogicalRelation
import org.apache.spark.sql.execution.streaming.{MemoryStream, StatefulOperatorStateInfo, StreamingSymmetricHashJoinHelper}
import org.apache.spark.sql.execution.streaming.state.{StateStore, StateStoreProviderId}
import org.apache.spark.sql.functions._
import org.apache.spark.sql.types._
import org.apache.spark.util.Utils
class StreamingInnerJoinSuite extends StreamTest with StateStoreMetricsTest with BeforeAndAfter {
before {
SparkSession.setActiveSession(spark) // set this before force initializing 'joinExec'
spark.streams.stateStoreCoordinator // initialize the lazy coordinator
}
after {
StateStore.stop()
}
import testImplicits._
test("stream stream inner join on non-time column") {
val input1 = MemoryStream[Int]
val input2 = MemoryStream[Int]
val df1 = input1.toDF.select('value as "key", ('value * 2) as "leftValue")
val df2 = input2.toDF.select('value as "key", ('value * 3) as "rightValue")
val joined = df1.join(df2, "key")
testStream(joined)(
AddData(input1, 1),
CheckAnswer(),
AddData(input2, 1, 10), // 1 arrived on input1 first, then input2, should join
CheckNewAnswer((1, 2, 3)),
AddData(input1, 10), // 10 arrived on input2 first, then input1, should join
CheckNewAnswer((10, 20, 30)),
AddData(input2, 1), // another 1 in input2 should join with 1 input1
CheckNewAnswer((1, 2, 3)),
StopStream,
StartStream(),
AddData(input1, 1), // multiple 1s should be kept in state causing multiple (1, 2, 3)
CheckNewAnswer((1, 2, 3), (1, 2, 3)),
StopStream,
StartStream(),
AddData(input1, 100),
AddData(input2, 100),
CheckNewAnswer((100, 200, 300))
)
}
test("stream stream inner join on windows - without watermark") {
val input1 = MemoryStream[Int]
val input2 = MemoryStream[Int]
val df1 = input1.toDF
.select('value as "key", 'value.cast("timestamp") as "timestamp", ('value * 2) as "leftValue")
.select('key, window('timestamp, "10 second"), 'leftValue)
val df2 = input2.toDF
.select('value as "key", 'value.cast("timestamp") as "timestamp",
('value * 3) as "rightValue")
.select('key, window('timestamp, "10 second"), 'rightValue)
val joined = df1.join(df2, Seq("key", "window"))
.select('key, $"window.end".cast("long"), 'leftValue, 'rightValue)
testStream(joined)(
AddData(input1, 1),
CheckNewAnswer(),
AddData(input2, 1),
CheckNewAnswer((1, 10, 2, 3)),
StopStream,
StartStream(),
AddData(input1, 25),
CheckNewAnswer(),
StopStream,
StartStream(),
AddData(input2, 25),
CheckNewAnswer((25, 30, 50, 75)),
AddData(input1, 1),
CheckNewAnswer((1, 10, 2, 3)), // State for 1 still around as there is no watermark
StopStream,
StartStream(),
AddData(input1, 5),
CheckNewAnswer(),
AddData(input2, 5),
CheckNewAnswer((5, 10, 10, 15)) // No filter by any watermark
)
}
test("stream stream inner join on windows - with watermark") {
val input1 = MemoryStream[Int]
val input2 = MemoryStream[Int]
val df1 = input1.toDF
.select('value as "key", 'value.cast("timestamp") as "timestamp", ('value * 2) as "leftValue")
.withWatermark("timestamp", "10 seconds")
.select('key, window('timestamp, "10 second"), 'leftValue)
val df2 = input2.toDF
.select('value as "key", 'value.cast("timestamp") as "timestamp",
('value * 3) as "rightValue")
.select('key, window('timestamp, "10 second"), 'rightValue)
val joined = df1.join(df2, Seq("key", "window"))
.select('key, $"window.end".cast("long"), 'leftValue, 'rightValue)
testStream(joined)(
AddData(input1, 1),
CheckAnswer(),
assertNumStateRows(total = 1, updated = 1),
AddData(input2, 1),
CheckAnswer((1, 10, 2, 3)),
assertNumStateRows(total = 2, updated = 1),
StopStream,
StartStream(),
AddData(input1, 25),
CheckNewAnswer(), // watermark = 15, no-data-batch should remove 2 rows having window=[0,10]
assertNumStateRows(total = 1, updated = 1),
AddData(input2, 25),
CheckNewAnswer((25, 30, 50, 75)),
assertNumStateRows(total = 2, updated = 1),
StopStream,
StartStream(),
AddData(input2, 1),
CheckNewAnswer(), // Should not join as < 15 removed
assertNumStateRows(total = 2, updated = 0), // row not add as 1 < state key watermark = 15
AddData(input1, 5),
CheckNewAnswer(), // Same reason as above
assertNumStateRows(total = 2, updated = 0)
)
}
test("stream stream inner join with time range - with watermark - one side condition") {
import org.apache.spark.sql.functions._
val leftInput = MemoryStream[(Int, Int)]
val rightInput = MemoryStream[(Int, Int)]
val df1 = leftInput.toDF.toDF("leftKey", "time")
.select('leftKey, 'time.cast("timestamp") as "leftTime", ('leftKey * 2) as "leftValue")
.withWatermark("leftTime", "10 seconds")
val df2 = rightInput.toDF.toDF("rightKey", "time")
.select('rightKey, 'time.cast("timestamp") as "rightTime", ('rightKey * 3) as "rightValue")
.withWatermark("rightTime", "10 seconds")
val joined =
df1.join(df2, expr("leftKey = rightKey AND leftTime < rightTime - interval 5 seconds"))
.select('leftKey, 'leftTime.cast("int"), 'rightTime.cast("int"))
testStream(joined)(
AddData(leftInput, (1, 5)),
CheckAnswer(),
AddData(rightInput, (1, 11)),
CheckNewAnswer((1, 5, 11)),
AddData(rightInput, (1, 10)),
CheckNewAnswer(), // no match as leftTime 5 is not < rightTime 10 - 5
assertNumStateRows(total = 3, updated = 3),
// Increase event time watermark to 20s by adding data with time = 30s on both inputs
AddData(leftInput, (1, 3), (1, 30)),
CheckNewAnswer((1, 3, 10), (1, 3, 11)),
assertNumStateRows(total = 5, updated = 2),
AddData(rightInput, (0, 30)),
CheckNewAnswer(),
// event time watermark: max event time - 10 ==> 30 - 10 = 20
// so left side going to only receive data where leftTime > 20
// right side state constraint: 20 < leftTime < rightTime - 5 ==> rightTime > 25
// right state where rightTime <= 25 will be cleared, (1, 11) and (1, 10) removed
assertNumStateRows(total = 4, updated = 1),
// New data to right input should match with left side (1, 3) and (1, 5), as left state should
// not be cleared. But rows rightTime <= 20 should be filtered due to event time watermark and
// state rows with rightTime <= 25 should be removed from state.
// (1, 20) ==> filtered by event time watermark = 20
// (1, 21) ==> passed filter, matched with left (1, 3) and (1, 5), not added to state
// as 21 < state watermark = 25
// (1, 28) ==> passed filter, matched with left (1, 3) and (1, 5), added to state
AddData(rightInput, (1, 20), (1, 21), (1, 28)),
CheckNewAnswer((1, 3, 21), (1, 5, 21), (1, 3, 28), (1, 5, 28)),
assertNumStateRows(total = 5, updated = 1),
// New data to left input with leftTime <= 20 should be filtered due to event time watermark
AddData(leftInput, (1, 20), (1, 21)),
CheckNewAnswer((1, 21, 28)),
assertNumStateRows(total = 6, updated = 1)
)
}
test("stream stream inner join with time range - with watermark - two side conditions") {
import org.apache.spark.sql.functions._
val leftInput = MemoryStream[(Int, Int)]
val rightInput = MemoryStream[(Int, Int)]
val df1 = leftInput.toDF.toDF("leftKey", "time")
.select('leftKey, 'time.cast("timestamp") as "leftTime", ('leftKey * 2) as "leftValue")
.withWatermark("leftTime", "20 seconds")
val df2 = rightInput.toDF.toDF("rightKey", "time")
.select('rightKey, 'time.cast("timestamp") as "rightTime", ('rightKey * 3) as "rightValue")
.withWatermark("rightTime", "30 seconds")
val condition = expr(
"leftKey = rightKey AND " +
"leftTime BETWEEN rightTime - interval 10 seconds AND rightTime + interval 5 seconds")
// This translates to leftTime <= rightTime + 5 seconds AND leftTime >= rightTime - 10 seconds
// So given leftTime, rightTime has to be BETWEEN leftTime - 5 seconds AND leftTime + 10 seconds
//
// =============== * ======================== * ============================== * ==> leftTime
// | | |
// |<---- 5s -->|<------ 10s ------>| |<------ 10s ------>|<---- 5s -->|
// | | |
// == * ============================== * =========>============== * ===============> rightTime
//
// E.g.
// if rightTime = 60, then it matches only leftTime = [50, 65]
// if leftTime = 20, then it match only with rightTime = [15, 30]
//
// State value predicates
// left side:
// values allowed: leftTime >= rightTime - 10s ==> leftTime > eventTimeWatermark - 10
// drop state where leftTime < eventTime - 10
// right side:
// values allowed: rightTime >= leftTime - 5s ==> rightTime > eventTimeWatermark - 5
// drop state where rightTime < eventTime - 5
val joined =
df1.join(df2, condition).select('leftKey, 'leftTime.cast("int"), 'rightTime.cast("int"))
testStream(joined)(
// If leftTime = 20, then it match only with rightTime = [15, 30]
AddData(leftInput, (1, 20)),
CheckAnswer(),
AddData(rightInput, (1, 14), (1, 15), (1, 25), (1, 26), (1, 30), (1, 31)),
CheckNewAnswer((1, 20, 15), (1, 20, 25), (1, 20, 26), (1, 20, 30)),
assertNumStateRows(total = 7, updated = 7),
// If rightTime = 60, then it matches only leftTime = [50, 65]
AddData(rightInput, (1, 60)),
CheckNewAnswer(), // matches with nothing on the left
AddData(leftInput, (1, 49), (1, 50), (1, 65), (1, 66)),
CheckNewAnswer((1, 50, 60), (1, 65, 60)),
// Event time watermark = min(left: 66 - delay 20 = 46, right: 60 - delay 30 = 30) = 30
// Left state value watermark = 30 - 10 = slightly less than 20 (since condition has <=)
// Should drop < 20 from left, i.e., none
// Right state value watermark = 30 - 5 = slightly less than 25 (since condition has <=)
// Should drop < 25 from the right, i.e., 14 and 15
assertNumStateRows(total = 10, updated = 5), // 12 - 2 removed
AddData(leftInput, (1, 30), (1, 31)), // 30 should not be processed or added to state
CheckNewAnswer((1, 31, 26), (1, 31, 30), (1, 31, 31)),
assertNumStateRows(total = 11, updated = 1), // only 31 added
// Advance the watermark
AddData(rightInput, (1, 80)),
CheckNewAnswer(),
// Event time watermark = min(left: 66 - delay 20 = 46, right: 80 - delay 30 = 50) = 46
// Left state value watermark = 46 - 10 = slightly less than 36 (since condition has <=)
// Should drop < 36 from left, i.e., 20, 31 (30 was not added)
// Right state value watermark = 46 - 5 = slightly less than 41 (since condition has <=)
// Should drop < 41 from the right, i.e., 25, 26, 30, 31
assertNumStateRows(total = 6, updated = 1), // 12 - 6 removed
AddData(rightInput, (1, 46), (1, 50)), // 46 should not be processed or added to state
CheckNewAnswer((1, 49, 50), (1, 50, 50)),
assertNumStateRows(total = 7, updated = 1) // 50 added
)
}
testQuietly("stream stream inner join without equality predicate") {
val input1 = MemoryStream[Int]
val input2 = MemoryStream[Int]
val df1 = input1.toDF.select('value as "leftKey", ('value * 2) as "leftValue")
val df2 = input2.toDF.select('value as "rightKey", ('value * 3) as "rightValue")
val joined = df1.join(df2, expr("leftKey < rightKey"))
val e = intercept[Exception] {
val q = joined.writeStream.format("memory").queryName("test").start()
input1.addData(1)
q.awaitTermination(10000)
}
assert(e.toString.contains("Stream-stream join without equality predicate is not supported"))
}
test("stream stream self join") {
val input = MemoryStream[Int]
val df = input.toDF
val join =
df.select('value % 5 as "key", 'value).join(
df.select('value % 5 as "key", 'value), "key")
testStream(join)(
AddData(input, 1, 2),
CheckAnswer((1, 1, 1), (2, 2, 2)),
StopStream,
StartStream(),
AddData(input, 3, 6),
/*
(1, 1) (1, 1)
(2, 2) x (2, 2) = (1, 1, 1), (1, 1, 6), (2, 2, 2), (1, 6, 1), (1, 6, 6)
(1, 6) (1, 6)
*/
CheckAnswer((3, 3, 3), (1, 1, 1), (1, 1, 6), (2, 2, 2), (1, 6, 1), (1, 6, 6)))
}
test("locality preferences of StateStoreAwareZippedRDD") {
import StreamingSymmetricHashJoinHelper._
withTempDir { tempDir =>
val queryId = UUID.randomUUID
val opId = 0
val path = Utils.createDirectory(tempDir.getAbsolutePath, Random.nextFloat.toString).toString
val stateInfo = StatefulOperatorStateInfo(path, queryId, opId, 0L, 5)
implicit val sqlContext = spark.sqlContext
val coordinatorRef = sqlContext.streams.stateStoreCoordinator
val numPartitions = 5
val storeNames = Seq("name1", "name2")
val partitionAndStoreNameToLocation = {
for (partIndex <- 0 until numPartitions; storeName <- storeNames) yield {
(partIndex, storeName) -> s"host-$partIndex-$storeName"
}
}.toMap
partitionAndStoreNameToLocation.foreach { case ((partIndex, storeName), hostName) =>
val providerId = StateStoreProviderId(stateInfo, partIndex, storeName)
coordinatorRef.reportActiveInstance(providerId, hostName, s"exec-$hostName")
require(
coordinatorRef.getLocation(providerId) ===
Some(ExecutorCacheTaskLocation(hostName, s"exec-$hostName").toString))
}
val rdd1 = spark.sparkContext.makeRDD(1 to 10, numPartitions)
val rdd2 = spark.sparkContext.makeRDD((1 to 10).map(_.toString), numPartitions)
val rdd = rdd1.stateStoreAwareZipPartitions(rdd2, stateInfo, storeNames, coordinatorRef) {
(left, right) => left.zip(right)
}
require(rdd.partitions.length === numPartitions)
for (partIndex <- 0 until numPartitions) {
val expectedLocations = storeNames.map { storeName =>
val hostName = partitionAndStoreNameToLocation((partIndex, storeName))
ExecutorCacheTaskLocation(hostName, s"exec-$hostName").toString
}.toSet
assert(rdd.preferredLocations(rdd.partitions(partIndex)).toSet === expectedLocations)
}
}
}
test("join between three streams") {
val input1 = MemoryStream[Int]
val input2 = MemoryStream[Int]
val input3 = MemoryStream[Int]
val df1 = input1.toDF.select('value as "leftKey", ('value * 2) as "leftValue")
val df2 = input2.toDF.select('value as "middleKey", ('value * 3) as "middleValue")
val df3 = input3.toDF.select('value as "rightKey", ('value * 5) as "rightValue")
val joined = df1.join(df2, expr("leftKey = middleKey")).join(df3, expr("rightKey = middleKey"))
testStream(joined)(
AddData(input1, 1, 5),
AddData(input2, 1, 5, 10),
AddData(input3, 5, 10),
CheckNewAnswer((5, 10, 5, 15, 5, 25)))
}
test("streaming join should require HashClusteredDistribution from children") {
val input1 = MemoryStream[Int]
val input2 = MemoryStream[Int]
val df1 = input1.toDF.select('value as 'a, 'value * 2 as 'b)
val df2 = input2.toDF.select('value as 'a, 'value * 2 as 'b).repartition('b)
val joined = df1.join(df2, Seq("a", "b")).select('a)
testStream(joined)(
AddData(input1, 1.to(1000): _*),
AddData(input2, 1.to(1000): _*),
CheckAnswer(1.to(1000): _*))
}
}
class StreamingOuterJoinSuite extends StreamTest with StateStoreMetricsTest with BeforeAndAfter {
import testImplicits._
import org.apache.spark.sql.functions._
before {
SparkSession.setActiveSession(spark) // set this before force initializing 'joinExec'
spark.streams.stateStoreCoordinator // initialize the lazy coordinator
}
after {
StateStore.stop()
}
private def setupStream(prefix: String, multiplier: Int): (MemoryStream[Int], DataFrame) = {
val input = MemoryStream[Int]
val df = input.toDF
.select(
'value as "key",
'value.cast("timestamp") as s"${prefix}Time",
('value * multiplier) as s"${prefix}Value")
.withWatermark(s"${prefix}Time", "10 seconds")
return (input, df)
}
private def setupWindowedJoin(joinType: String):
(MemoryStream[Int], MemoryStream[Int], DataFrame) = {
val (input1, df1) = setupStream("left", 2)
val (input2, df2) = setupStream("right", 3)
val windowed1 = df1.select('key, window('leftTime, "10 second"), 'leftValue)
val windowed2 = df2.select('key, window('rightTime, "10 second"), 'rightValue)
val joined = windowed1.join(windowed2, Seq("key", "window"), joinType)
.select('key, $"window.end".cast("long"), 'leftValue, 'rightValue)
(input1, input2, joined)
}
test("left outer early state exclusion on left") {
val (leftInput, df1) = setupStream("left", 2)
val (rightInput, df2) = setupStream("right", 3)
// Use different schemas to ensure the null row is being generated from the correct side.
val left = df1.select('key, window('leftTime, "10 second"), 'leftValue)
val right = df2.select('key, window('rightTime, "10 second"), 'rightValue.cast("string"))
val joined = left.join(
right,
left("key") === right("key")
&& left("window") === right("window")
&& 'leftValue > 4,
"left_outer")
.select(left("key"), left("window.end").cast("long"), 'leftValue, 'rightValue)
testStream(joined)(
MultiAddData(leftInput, 1, 2, 3)(rightInput, 3, 4, 5),
// The left rows with leftValue <= 4 should generate their outer join row now and
// not get added to the state.
CheckNewAnswer(Row(3, 10, 6, "9"), Row(1, 10, 2, null), Row(2, 10, 4, null)),
assertNumStateRows(total = 4, updated = 4),
// We shouldn't get more outer join rows when the watermark advances.
MultiAddData(leftInput, 20)(rightInput, 21),
CheckNewAnswer(),
AddData(rightInput, 20),
CheckNewAnswer((20, 30, 40, "60"))
)
}
test("left outer early state exclusion on right") {
val (leftInput, df1) = setupStream("left", 2)
val (rightInput, df2) = setupStream("right", 3)
// Use different schemas to ensure the null row is being generated from the correct side.
val left = df1.select('key, window('leftTime, "10 second"), 'leftValue)
val right = df2.select('key, window('rightTime, "10 second"), 'rightValue.cast("string"))
val joined = left.join(
right,
left("key") === right("key")
&& left("window") === right("window")
&& 'rightValue.cast("int") > 7,
"left_outer")
.select(left("key"), left("window.end").cast("long"), 'leftValue, 'rightValue)
testStream(joined)(
MultiAddData(leftInput, 3, 4, 5)(rightInput, 1, 2, 3),
// The right rows with rightValue <= 7 should never be added to the state.
CheckNewAnswer(Row(3, 10, 6, "9")), // rightValue = 9 > 7 hence joined and added to state
assertNumStateRows(total = 4, updated = 4),
// When the watermark advances, we get the outer join rows just as we would if they
// were added but didn't match the full join condition.
MultiAddData(leftInput, 20)(rightInput, 21), // watermark = 10, no-data-batch computes nulls
CheckNewAnswer(Row(4, 10, 8, null), Row(5, 10, 10, null)),
AddData(rightInput, 20),
CheckNewAnswer(Row(20, 30, 40, "60"))
)
}
test("right outer early state exclusion on left") {
val (leftInput, df1) = setupStream("left", 2)
val (rightInput, df2) = setupStream("right", 3)
// Use different schemas to ensure the null row is being generated from the correct side.
val left = df1.select('key, window('leftTime, "10 second"), 'leftValue)
val right = df2.select('key, window('rightTime, "10 second"), 'rightValue.cast("string"))
val joined = left.join(
right,
left("key") === right("key")
&& left("window") === right("window")
&& 'leftValue > 4,
"right_outer")
.select(right("key"), right("window.end").cast("long"), 'leftValue, 'rightValue)
testStream(joined)(
MultiAddData(leftInput, 1, 2, 3)(rightInput, 3, 4, 5),
// The left rows with leftValue <= 4 should never be added to the state.
CheckNewAnswer(Row(3, 10, 6, "9")), // leftValue = 7 > 4 hence joined and added to state
assertNumStateRows(total = 4, updated = 4),
// When the watermark advances, we get the outer join rows just as we would if they
// were added but didn't match the full join condition.
MultiAddData(leftInput, 20)(rightInput, 21), // watermark = 10, no-data-batch computes nulls
CheckNewAnswer(Row(4, 10, null, "12"), Row(5, 10, null, "15")),
AddData(rightInput, 20),
CheckNewAnswer(Row(20, 30, 40, "60"))
)
}
test("right outer early state exclusion on right") {
val (leftInput, df1) = setupStream("left", 2)
val (rightInput, df2) = setupStream("right", 3)
// Use different schemas to ensure the null row is being generated from the correct side.
val left = df1.select('key, window('leftTime, "10 second"), 'leftValue)
val right = df2.select('key, window('rightTime, "10 second"), 'rightValue.cast("string"))
val joined = left.join(
right,
left("key") === right("key")
&& left("window") === right("window")
&& 'rightValue.cast("int") > 7,
"right_outer")
.select(right("key"), right("window.end").cast("long"), 'leftValue, 'rightValue)
testStream(joined)(
MultiAddData(leftInput, 3, 4, 5)(rightInput, 1, 2, 3),
// The right rows with rightValue <= 7 should generate their outer join row now and
// not get added to the state.
CheckNewAnswer(Row(3, 10, 6, "9"), Row(1, 10, null, "3"), Row(2, 10, null, "6")),
assertNumStateRows(total = 4, updated = 4),
// We shouldn't get more outer join rows when the watermark advances.
MultiAddData(leftInput, 20)(rightInput, 21),
CheckNewAnswer(),
AddData(rightInput, 20),
CheckNewAnswer((20, 30, 40, "60"))
)
}
test("windowed left outer join") {
val (leftInput, rightInput, joined) = setupWindowedJoin("left_outer")
testStream(joined)(
// Test inner part of the join.
MultiAddData(leftInput, 1, 2, 3, 4, 5)(rightInput, 3, 4, 5, 6, 7),
CheckNewAnswer((3, 10, 6, 9), (4, 10, 8, 12), (5, 10, 10, 15)),
MultiAddData(leftInput, 21)(rightInput, 22), // watermark = 11, no-data-batch computes nulls
CheckNewAnswer(Row(1, 10, 2, null), Row(2, 10, 4, null)),
assertNumStateRows(total = 2, updated = 12),
AddData(leftInput, 22),
CheckNewAnswer(Row(22, 30, 44, 66)),
assertNumStateRows(total = 3, updated = 1)
)
}
test("windowed right outer join") {
val (leftInput, rightInput, joined) = setupWindowedJoin("right_outer")
testStream(joined)(
// Test inner part of the join.
MultiAddData(leftInput, 1, 2, 3, 4, 5)(rightInput, 3, 4, 5, 6, 7),
CheckNewAnswer((3, 10, 6, 9), (4, 10, 8, 12), (5, 10, 10, 15)),
MultiAddData(leftInput, 21)(rightInput, 22), // watermark = 11, no-data-batch computes nulls
CheckNewAnswer(Row(6, 10, null, 18), Row(7, 10, null, 21)),
assertNumStateRows(total = 2, updated = 12),
AddData(leftInput, 22),
CheckNewAnswer(Row(22, 30, 44, 66)),
assertNumStateRows(total = 3, updated = 1)
)
}
Seq(
("left_outer", Row(3, null, 5, null)),
("right_outer", Row(null, 2, null, 5))
).foreach { case (joinType: String, outerResult) =>
test(s"${joinType.replaceAllLiterally("_", " ")} with watermark range condition") {
import org.apache.spark.sql.functions._
val leftInput = MemoryStream[(Int, Int)]
val rightInput = MemoryStream[(Int, Int)]
val df1 = leftInput.toDF.toDF("leftKey", "time")
.select('leftKey, 'time.cast("timestamp") as "leftTime", ('leftKey * 2) as "leftValue")
.withWatermark("leftTime", "10 seconds")
val df2 = rightInput.toDF.toDF("rightKey", "time")
.select('rightKey, 'time.cast("timestamp") as "rightTime", ('rightKey * 3) as "rightValue")
.withWatermark("rightTime", "10 seconds")
val joined =
df1.join(
df2,
expr("leftKey = rightKey AND " +
"leftTime BETWEEN rightTime - interval 5 seconds AND rightTime + interval 5 seconds"),
joinType)
.select('leftKey, 'rightKey, 'leftTime.cast("int"), 'rightTime.cast("int"))
testStream(joined)(
AddData(leftInput, (1, 5), (3, 5)),
CheckAnswer(),
AddData(rightInput, (1, 10), (2, 5)),
CheckNewAnswer((1, 1, 5, 10)),
AddData(rightInput, (1, 11)),
CheckNewAnswer(), // no match as left time is too low
assertNumStateRows(total = 5, updated = 5),
// Increase event time watermark to 20s by adding data with time = 30s on both inputs
AddData(leftInput, (1, 7), (1, 30)),
CheckNewAnswer((1, 1, 7, 10), (1, 1, 7, 11)),
assertNumStateRows(total = 7, updated = 2),
AddData(rightInput, (0, 30)), // watermark = 30 - 10 = 20, no-data-batch computes nulls
CheckNewAnswer(outerResult),
assertNumStateRows(total = 2, updated = 1)
)
}
}
// When the join condition isn't true, the outer null rows must be generated, even if the join
// keys themselves have a match.
test("left outer join with non-key condition violated") {
val (leftInput, simpleLeftDf) = setupStream("left", 2)
val (rightInput, simpleRightDf) = setupStream("right", 3)
val left = simpleLeftDf.select('key, window('leftTime, "10 second"), 'leftValue)
val right = simpleRightDf.select('key, window('rightTime, "10 second"), 'rightValue)
val joined = left.join(
right,
left("key") === right("key") && left("window") === right("window") &&
'leftValue > 10 && ('rightValue < 300 || 'rightValue > 1000),
"left_outer")
.select(left("key"), left("window.end").cast("long"), 'leftValue, 'rightValue)
testStream(joined)(
// leftValue <= 10 should generate outer join rows even though it matches right keys
MultiAddData(leftInput, 1, 2, 3)(rightInput, 1, 2, 3),
CheckNewAnswer(Row(1, 10, 2, null), Row(2, 10, 4, null), Row(3, 10, 6, null)),
assertNumStateRows(total = 3, updated = 3), // only right 1, 2, 3 added
MultiAddData(leftInput, 20)(rightInput, 21), // watermark = 10, no-data-batch cleared < 10
CheckNewAnswer(),
assertNumStateRows(total = 2, updated = 2), // only 20 and 21 left in state
AddData(rightInput, 20),
CheckNewAnswer(Row(20, 30, 40, 60)),
assertNumStateRows(total = 3, updated = 1),
// leftValue and rightValue both satisfying condition should not generate outer join rows
MultiAddData(leftInput, 40, 41)(rightInput, 40, 41), // watermark = 31
CheckNewAnswer((40, 50, 80, 120), (41, 50, 82, 123)),
assertNumStateRows(total = 4, updated = 4), // only left 40, 41 + right 40,41 left in state
MultiAddData(leftInput, 70)(rightInput, 71), // watermark = 60
CheckNewAnswer(),
assertNumStateRows(total = 2, updated = 2), // only 70, 71 left in state
AddData(rightInput, 70),
CheckNewAnswer((70, 80, 140, 210)),
assertNumStateRows(total = 3, updated = 1),
// rightValue between 300 and 1000 should generate outer join rows even though it matches left
MultiAddData(leftInput, 101, 102, 103)(rightInput, 101, 102, 103), // watermark = 91
CheckNewAnswer(),
assertNumStateRows(total = 6, updated = 3), // only 101 - 103 left in state
MultiAddData(leftInput, 1000)(rightInput, 1001),
CheckNewAnswer(
Row(101, 110, 202, null),
Row(102, 110, 204, null),
Row(103, 110, 206, null)),
assertNumStateRows(total = 2, updated = 2)
)
}
}
|
aosagie/spark
|
sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamingJoinSuite.scala
|
Scala
|
apache-2.0
| 30,132 |
/**
* Copyright 2013 Gianluca Amato <[email protected]>
*
* This file is part of JANDOM: JVM-based Analyzer for Numerical DOMains
* JANDOM is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* JANDOM is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty ofa
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with JANDOM. If not, see <http://www.gnu.org/licenses/>.
*/
package it.unich.jandom.ui
import it.unich.jandom.domains.objects.ObjectDomainFactory
import it.unich.jandom.domains.objects.PairSharingDomain
import it.unich.jandom.domains.objects.AliasingDomain
/**
* A parameter enumeration for the object domain which are supported in Jandom.
* @author Gianluca Amato <[email protected]>
*/
object ObjectDomains extends ParameterEnumeration[ObjectDomainFactory] {
val name = "Object Domain"
val description = "The object domain to use for the analysis"
val values: Seq[ParameterValue[ObjectDomainFactory]] = Seq(
ParameterValue(PairSharingDomain,"Pair Sharing","The pair sharing domain by Spoto and Secci"),
ParameterValue(AliasingDomain,"Aliasing","A domain for aliasing")
)
val default = values.last
}
|
rubino22/JDBeta
|
core/src/main/scala/it/unich/jandom/ui/ObjectDomains.scala
|
Scala
|
lgpl-3.0
| 1,527 |
/**
* Licensed to Gravity.com under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. Gravity.com licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.gravity.goose.text
/**
* Created by Jim Plush
* User: jim
* Date: 8/16/11
*/
import com.gravity.goose.utils.FileHelper
import com.gravity.goose.Language._
import com.chenlb.mmseg4j.ComplexSeg
import com.chenlb.mmseg4j.Dictionary
import com.chenlb.mmseg4j.MMSeg
import com.chenlb.mmseg4j.Seg
import com.chenlb.mmseg4j.Word
import java.io.StringReader
import scala.collection.JavaConversions._
import java.util.HashMap
import scala.collection.Set
import java.util.Map
import com.gravity.goose.Language
object StopWords {
// the confusing pattern below is basically just match any non-word character excluding white-space.
private val PUNCTUATION: StringReplacement = StringReplacement.compile("[^\\\\p{Ll}\\\\p{Lu}\\\\p{Lt}\\\\p{Lo}\\\\p{Nd}\\\\p{Pc}\\\\s]", string.empty)
//raisercostin: use the other method of memoising the languages on first access
// TODO: there must a better way to do this. See
// http://www.uofr.net/~greg/java/get-resource-listing.html?
// val LANGUAGES: Set[String] = Set("ar", "da", "de", "en", "es", "fi", "fr",
// "hu", "id", "it", "ko", "nb", "nl", "no",
// "pl", "pt", "ru", "sv", "zh")
//
// val stopWordsMap: Map[String, Set[String]] =
// (LANGUAGES.view map {lang =>
// lang ->
// FileHelper.loadResourceFile("stopwords-" + lang + ".txt",
// StopWords.getClass).split(sys.props("line.separator")).toSet
// }).toMap.withDefaultValue(Set())
//val STOP_WORDS = FileHelper.loadResourceFile("stopwords-en.txt", StopWords.getClass).split(sys.props("line.separator")).toSet
private var stopWordsMap: Map[String, Set[String]] = new HashMap[String, Set[String]]()
def removePunctuation(str: String): String = {
PUNCTUATION.replaceAll(str)
}
def getStopWords(language: Language): Set[String] = getStopWords(language.toString)
def getStopWords(lname: String): Set[String] = {
var stopWords = stopWordsMap.get(lname)
if (stopWords == null) {
var stopWordsFile = "stopwords-%s.txt" format lname
stopWords = FileHelper.loadResourceFile(stopWordsFile, StopWords.getClass).split(sys.props("line.separator")).toSet
stopWords = stopWords.map(s=>s.trim)
stopWordsMap.put(lname, stopWords)
}
stopWords
}
def getCandidateWords(strippedInput: String, language: String): Array[String] = getCandidateWords(strippedInput,
Language(language))
def getCandidateWords(strippedInput: String, language: Language): Array[String] = {
language match {
case English => string.SPACE_SPLITTER.split(strippedInput)
case Chinese => tokenize(strippedInput).toArray
case _ => string.SPACE_SPLITTER.split(strippedInput)
}
}
def getStopWordCount(content: String, lang: String = "en"): WordStats = {
// def getStopWordCount(content: String, language: Language): WordStats = {
if (string.isNullOrEmpty(content)) return WordStats.EMPTY
val ws: WordStats = new WordStats
val strippedInput: String = removePunctuation(content)
//val candidateWords = getCandidateWords(strippedInput, language)
val candidateWords = getCandidateWords(strippedInput, lang)
var overlappingStopWords: List[String] = List[String]()
// val stopWords = getStopWords(language)
val stopWords = getStopWords(lang)
if (stopWords.size > 0) {
//scala-ify? overlappingStopWords = candidateWords.filter(w=>stopWords.contains(w.toLowerCase)).map(w=>w.toLowerCase)
candidateWords.foreach(w => {
if (stopWords.contains(w.toLowerCase)) {
overlappingStopWords = w.toLowerCase :: overlappingStopWords
}
})
}
ws.setWordCount(candidateWords.length)
ws.setStopWordCount(overlappingStopWords.size)
ws.setStopWords(overlappingStopWords)
ws
}
def tokenize(line: String): List[String] = {
var seg = new ComplexSeg(Dictionary.getInstance());
var mmSeg = new MMSeg(new StringReader(line), seg);
var tokens = List[String]();
var word = mmSeg.next()
while (word != null) {
tokens = word.getString() :: tokens ;
word = mmSeg.next();
}
return tokens;
}
}
|
raisercostin/goose
|
src/main/scala/com/gravity/goose/text/StopWords.scala
|
Scala
|
apache-2.0
| 4,930 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql
import scala.annotation.implicitNotFound
import scala.reflect.ClassTag
import org.apache.spark.annotation.{Experimental, InterfaceStability}
import org.apache.spark.sql.types._
/**
* :: Experimental ::
* Used to convert a JVM object of type `T` to and from the internal Spark SQL representation.
*
* == Scala ==
* Encoders are generally created automatically through implicits from a `SparkSession`, or can be
* explicitly created by calling static methods on [[Encoders]].
*
* {{{
* import spark.implicits._
*
* val ds = Seq(1, 2, 3).toDS() // implicitly provided (spark.implicits.newIntEncoder)
* }}}
*
* == Java ==
* Encoders are specified by calling static methods on [[Encoders]].
*
* {{{
* List<String> data = Arrays.asList("abc", "abc", "xyz");
* Dataset<String> ds = context.createDataset(data, Encoders.STRING());
* }}}
*
* Encoders can be composed into tuples:
*
* {{{
* Encoder<Tuple2<Integer, String>> encoder2 = Encoders.tuple(Encoders.INT(), Encoders.STRING());
* List<Tuple2<Integer, String>> data2 = Arrays.asList(new scala.Tuple2(1, "a");
* Dataset<Tuple2<Integer, String>> ds2 = context.createDataset(data2, encoder2);
* }}}
*
* Or constructed from Java Beans:
*
* {{{
* Encoders.bean(MyClass.class);
* }}}
*
* == Implementation ==
* - Encoders are not required to be thread-safe and thus they do not need to use locks to guard
* against concurrent access if they reuse internal buffers to improve performance.
*
* @since 1.6.0
*/
@Experimental
@InterfaceStability.Evolving
@implicitNotFound("Unable to find encoder for type ${T}. An implicit Encoder[${T}] is needed to " +
"store ${T} instances in a Dataset. Primitive types (Int, String, etc) and Product types (case " +
"classes) are supported by importing spark.implicits._ Support for serializing other types " +
"will be added in future releases.")
trait Encoder[T] extends Serializable {
/** Returns the schema of encoding this type of object as a Row. */
def schema: StructType
/**
* A ClassTag that can be used to construct an Array to contain a collection of `T`.
*/
def clsTag: ClassTag[T]
}
|
tejasapatil/spark
|
sql/catalyst/src/main/scala/org/apache/spark/sql/Encoder.scala
|
Scala
|
apache-2.0
| 2,990 |
/***********************************************************************
* Copyright (c) 2013-2016 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
*************************************************************************/
package org.locationtech.geomesa.convert.text
import java.io.{InputStreamReader, ByteArrayInputStream}
import java.nio.charset.StandardCharsets
import com.google.common.io.Resources
import com.typesafe.config.ConfigFactory
import com.vividsolutions.jts.geom.{Coordinate, GeometryFactory}
import org.apache.commons.csv.CSVFormat
import org.junit.runner.RunWith
import org.locationtech.geomesa.convert.SimpleFeatureConverters
import org.locationtech.geomesa.convert.Transformers.DefaultCounter
import org.locationtech.geomesa.utils.geotools.SimpleFeatureTypes
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
@RunWith(classOf[JUnitRunner])
class DelimitedTextConverterTest extends Specification {
sequential
"DelimitedTextConverter" should {
val data =
"""
|1,hello,45.0,45.0
|2,world,90.0,90.0
|willfail,hello
""".stripMargin
val conf = ConfigFactory.parseString(
"""
| {
| type = "delimited-text",
| format = "DEFAULT",
| id-field = "md5(string2bytes($0))",
| fields = [
| { name = "oneup", transform = "$1" },
| { name = "phrase", transform = "concat($1, $2)" },
| { name = "lat", transform = "$3::double" },
| { name = "lon", transform = "$4::double" },
| { name = "lit", transform = "'hello'" },
| { name = "geom", transform = "point($lat, $lon)" }
| { name = "l1", transform = "concat($lit, $lit)" }
| { name = "l2", transform = "concat($l1, $lit)" }
| { name = "l3", transform = "concat($l2, $lit)" }
| ]
| }
""".stripMargin)
val sft = SimpleFeatureTypes.createType(ConfigFactory.load("sft_testsft.conf"))
"be built from a conf" >> {
val converter = SimpleFeatureConverters.build[String](sft, conf)
converter must not beNull
val res = converter.processInput(data.split("\\n").toIterator.filterNot( s => "^\\\\s*$".r.findFirstIn(s).size > 0)).toList
converter.close()
"and process some data" >> {
res.size must be equalTo 2
res(0).getAttribute("phrase").asInstanceOf[String] must be equalTo "1hello"
res(1).getAttribute("phrase").asInstanceOf[String] must be equalTo "2world"
}
"handle more derived fields than input fields" >> {
res(0).getAttribute("oneup").asInstanceOf[String] must be equalTo "1"
}
}
"handle tab delimited files" >> {
val conf = ConfigFactory.parseString(
"""
| {
| type = "delimited-text",
| format = "TDF",
| id-field = "md5(string2bytes($0))",
| fields = [
| { name = "phrase", transform = "concat($1, $2)" },
| { name = "lat", transform = "$3::double" },
| { name = "lon", transform = "$4::double" },
| { name = "geom", transform = "point($lat, $lon)" }
| ]
| }
""".stripMargin)
val sft = SimpleFeatureTypes.createType(ConfigFactory.load("sft_testsft.conf"))
val converter = SimpleFeatureConverters.build[String](sft, conf)
converter must not beNull
val res = converter.processInput(data.split("\\n").toIterator.filterNot( s => "^\\\\s*$".r.findFirstIn(s).size > 0).map(_.replaceAll(",", "\\t"))).toList
converter.close()
res.size must be equalTo 2
res(0).getAttribute("phrase").asInstanceOf[String] must be equalTo "1hello"
res(1).getAttribute("phrase").asInstanceOf[String] must be equalTo "2world"
}
"handle line number transform and filename global parameter correctly " >> {
val conf = ConfigFactory.parseString(
"""
| {
| type = "delimited-text",
| format = "TDF",
| id-field = "md5(string2bytes($0))",
| fields = [
| { name = "phrase", transform = "concat($1, $2)" },
| { name = "lineNr", transform = "lineNo()"},
| { name = "fn", transform = "$filename"},
| { name = "lat", transform = "$3::double" },
| { name = "lon", transform = "$4::double" },
| { name = "geom", transform = "point($lat, $lon)" }
| ]
| }
""".stripMargin)
val sft = SimpleFeatureTypes.createType(ConfigFactory.load("sft_testsft.conf"))
val converter = SimpleFeatureConverters.build[String](sft, conf)
converter must not beNull
val input = data.split("\\n").toIterator.filterNot( s => "^\\\\s*$".r.findFirstIn(s).size > 0).map(_.replaceAll(",", "\\t"))
val ec = converter.createEvaluationContext(Map("filename"-> "/some/file/path/testfile.txt"))
val res = converter.processInput(input, ec).toList
converter.close()
res.size must be equalTo 2
res(0).getAttribute("phrase").asInstanceOf[String] must be equalTo "1hello"
res(0).getAttribute("lineNr").asInstanceOf[Long] must be equalTo 1
res(0).getAttribute("fn").asInstanceOf[String] must be equalTo "/some/file/path/testfile.txt"
res(1).getAttribute("phrase").asInstanceOf[String] must be equalTo "2world"
res(1).getAttribute("lineNr").asInstanceOf[Long] must be equalTo 2
res(1).getAttribute("fn").asInstanceOf[String] must be equalTo "/some/file/path/testfile.txt"
}
"handle line number transform and filename global in id-field " >> {
val conf = ConfigFactory.parseString(
"""
| {
| type = "delimited-text",
| format = "TDF",
| id-field = "concat($filename, lineNo())",
| fields = [
| { name = "phrase", transform = "concat($1, $2)" },
| { name = "lineNr", transform = "lineNo()"},
| { name = "fn", transform = "$filename"},
| { name = "lat", transform = "$3::double" },
| { name = "lon", transform = "$4::double" },
| { name = "geom", transform = "point($lat, $lon)" }
| ]
| }
""".stripMargin)
val sft = SimpleFeatureTypes.createType(ConfigFactory.load("sft_testsft.conf"))
val converter = SimpleFeatureConverters.build[String](sft, conf)
converter must not beNull
val input = data.split("\\n").toIterator.filterNot( s => "^\\\\s*$".r.findFirstIn(s).size > 0).map(_.replaceAll(",", "\\t"))
val ec = converter.createEvaluationContext(Map("filename"-> "/some/file/path/testfile.txt"))
val res = converter.processInput(input, ec).toList
converter.close()
res.size must be equalTo 2
res(0).getAttribute("phrase").asInstanceOf[String] must be equalTo "1hello"
res(0).getAttribute("lineNr").asInstanceOf[Long] must be equalTo 1
res(0).getAttribute("fn").asInstanceOf[String] must be equalTo "/some/file/path/testfile.txt"
res(1).getAttribute("phrase").asInstanceOf[String] must be equalTo "2world"
res(1).getAttribute("lineNr").asInstanceOf[Long] must be equalTo 2
res(1).getAttribute("fn").asInstanceOf[String] must be equalTo "/some/file/path/testfile.txt"
}
"handle projecting to just the attributes in the SFT (and associated input dependencies)" >> {
// l3 has cascading dependencies
val subsft = SimpleFeatureTypes.createType("subsettest", "l3:String,geom:Point:srid=4326")
val conv = SimpleFeatureConverters.build[String](subsft, conf)
val res = conv.processInput(data.split("\\n").toIterator.filterNot( s => "^\\\\s*$".r.findFirstIn(s).size > 0)).toList
conv.close()
res.length must be equalTo 2
}
"handle horrible quoting and nested separators" >> {
val conf = ConfigFactory.parseString(
"""
| {
| type = "delimited-text",
| format = "EXCEL",
| id-field = "md5(string2bytes($0))",
| fields = [
| { name = "phrase", transform = "concat($1, $2)" },
| { name = "lat", transform = "$3::double" },
| { name = "lon", transform = "$4::double" },
| { name = "geom", transform = "point($lat, $lon)" }
| ]
| }
""".stripMargin)
import scala.collection.JavaConversions._
val data = Resources.readLines(Resources.getResource("messydata.csv"), StandardCharsets.UTF_8)
val sft = SimpleFeatureTypes.createType(ConfigFactory.load("sft_testsft.conf"))
val converter = SimpleFeatureConverters.build[String](sft, conf)
converter must not beNull
val res = converter.processInput(data.iterator()).toList
converter.close()
res.size must be equalTo 2
res(0).getAttribute("phrase").asInstanceOf[String] must be equalTo "1hello, \\"foo\\""
res(1).getAttribute("phrase").asInstanceOf[String] must be equalTo "2world"
}
"handle records bigger than buffer size" >> {
// set the buffer size to 16 bytes and try to write records that are bigger than the buffer size
val sizeConf = ConfigFactory.parseString(
"""
| {
| type = "delimited-text",
| format = "DEFAULT",
| id-field = "md5(string2bytes($0))",
| options = {
| pipe-size = 16 // 16 bytes
| },
| fields = [
| { name = "oneup", transform = "$1" },
| { name = "phrase", transform = "concat($1, $2)" },
| { name = "lat", transform = "$3::double" },
| { name = "lon", transform = "$4::double" },
| { name = "lit", transform = "'hello'" },
| { name = "geom", transform = "point($lat, $lon)" }
| ]
| }
""".stripMargin)
val converter = SimpleFeatureConverters.build[String](sft, sizeConf)
converter.asInstanceOf[DelimitedTextConverter]
val data =
"""
|1,hello,45.0,45.0
|2,world,90.0,90.0
|willfail,hello
""".stripMargin
val nonEmptyData = data.split("\\n").toIterator.filterNot(s => "^\\\\s*$".r.findFirstIn(s).size > 0)
val res = converter.processInput(nonEmptyData).toList
converter.close()
res.size must be greaterThan 0
}
"handle wkt" >> {
val wktData =
"""
|1,hello,Point(46.0 45.0)
|2,world,Point(90.0 90.0)
""".stripMargin
val wktConf = ConfigFactory.parseString(
"""
| {
| type = "delimited-text",
| format = "DEFAULT",
| id-field = "md5(string2bytes($0))",
| fields = [
| { name = "oneup", transform = "$1" },
| { name = "phrase", transform = "concat($1, $2)" },
| { name = "geom", transform = "geometry($3)" }
| ]
| }
""".stripMargin)
val wktSft = SimpleFeatureTypes.createType(ConfigFactory.load("sft_testsft.conf"))
val converter = SimpleFeatureConverters.build[String](wktSft, wktConf)
val res = converter.processInput(wktData.split("\\n").toIterator.filterNot( s => "^\\\\s*$".r.findFirstIn(s).size > 0)).toList
res.length mustEqual 2
converter.close()
val geoFac = new GeometryFactory()
res(0).getDefaultGeometry mustEqual geoFac.createPoint(new Coordinate(46, 45))
res(1).getDefaultGeometry mustEqual geoFac.createPoint(new Coordinate(90, 90))
}
"skip header lines" >> {
val conf =
"""
| {
| type = "delimited-text",
| format = "DEFAULT",
| id-field = "md5(string2bytes($0))",
| options = {
| skip-lines = SKIP
| },
| fields = [
| { name = "oneup", transform = "$1" },
| { name = "phrase", transform = "concat($1, $2)" },
| { name = "geom", transform = "geometry($3)" }
| ]
| }
""".stripMargin
val wktSft = SimpleFeatureTypes.createType(ConfigFactory.load("sft_testsft.conf"))
"csv parser failblog or misunderstanding test" >> {
val format = CSVFormat.DEFAULT.withSkipHeaderRecord(true).withIgnoreEmptyLines(true)
val trueData =
"""
|num,msg,geom
|1,hello,Point(46.0 45.0)
|2,world,Point(90.0 90.0)
""".stripMargin
import scala.collection.JavaConversions._
val sz = format.parse(new InputStreamReader(new ByteArrayInputStream(trueData.getBytes))).iterator().toList.size
// prove that skipHeader and empty lines doesn't work (at least as I think) and that we are safe to
// consume the header record and empty lines as part of our config
sz mustEqual 4
}
"with single line header" >> {
val trueConf = ConfigFactory.parseString(conf.replaceAllLiterally("SKIP", "1"))
val trueData =
"""
|num,msg,geom
|1,hello,Point(46.0 45.0)
|2,world,Point(90.0 90.0)
""".stripMargin
val converter = SimpleFeatureConverters.build[String](wktSft, trueConf)
val counter = new DefaultCounter
val ec = converter.createEvaluationContext(counter = counter)
val res = converter.processInput(trueData.split("\\n").toIterator.filterNot( s => "^\\\\s*$".r.findFirstIn(s).size > 0), ec).toList
res.length mustEqual 2
converter.close()
counter.getLineCount mustEqual 3
counter.getSuccess mustEqual 2
counter.getFailure mustEqual 0
val geoFac = new GeometryFactory()
res(0).getDefaultGeometry mustEqual geoFac.createPoint(new Coordinate(46, 45))
res(1).getDefaultGeometry mustEqual geoFac.createPoint(new Coordinate(90, 90))
}
"with header set to 0" >> {
val falseConf = ConfigFactory.parseString(conf.replaceAllLiterally("SKIP", "0"))
val falseData =
"""
|1,hello,Point(46.0 45.0)
|2,world,Point(90.0 90.0)
""".stripMargin
val converter = SimpleFeatureConverters.build[String](wktSft, falseConf)
val counter = new DefaultCounter
val ec = converter.createEvaluationContext(counter = counter)
val res = converter.processInput(falseData.split("\\n").toIterator.filterNot( s => "^\\\\s*$".r.findFirstIn(s).size > 0), ec).toList
res.length mustEqual 2
converter.close()
counter.getLineCount mustEqual 2
counter.getSuccess mustEqual 2
counter.getFailure mustEqual 0
val geoFac = new GeometryFactory()
res(0).getDefaultGeometry mustEqual geoFac.createPoint(new Coordinate(46, 45))
res(1).getDefaultGeometry mustEqual geoFac.createPoint(new Coordinate(90, 90))
}
"with header set to 3" >> {
val falseConf = ConfigFactory.parseString(conf.replaceAllLiterally("SKIP", "3"))
val falseData =
"""
|num,msg,geom
|some other garbage
|that somebody placed in my file maybe as a comment
|1,hello,Point(46.0 45.0)
|2,world,Point(90.0 90.0)
""".stripMargin
val converter = SimpleFeatureConverters.build[String](wktSft, falseConf)
val counter = new DefaultCounter
val ec = converter.createEvaluationContext(counter = counter)
val res = converter.processInput(falseData.split("\\n").toIterator.filterNot( s => "^\\\\s*$".r.findFirstIn(s).size > 0), ec).toList
res.length mustEqual 2
converter.close()
counter.getLineCount mustEqual 5
counter.getSuccess mustEqual 2
counter.getFailure mustEqual 0
val geoFac = new GeometryFactory()
res(0).getDefaultGeometry mustEqual geoFac.createPoint(new Coordinate(46, 45))
res(1).getDefaultGeometry mustEqual geoFac.createPoint(new Coordinate(90, 90))
}
}
}
}
|
mdzimmerman/geomesa
|
geomesa-convert/geomesa-convert-text/src/test/scala/org/locationtech/geomesa/convert/text/DelimitedTextConverterTest.scala
|
Scala
|
apache-2.0
| 16,615 |
/*
* Copyright 2016 University of Basel, Graphics and Vision Research Group
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package scalismo.faces.image
import scala.collection.parallel.immutable.ParVector
import scala.reflect.ClassTag
/** Image buffer for mutable creation of an image, use toImage when finished to convert to a standard immutable image */
class ImageBuffer[@specialized(Int, Float, Double) Pixel: ClassTag] (val domain: PixelImageDomain, private var data: Array[Pixel]) {
def width: Int = domain.width
def height: Int = domain.height
require(data.length == domain.length)
private var safeToWrite: Boolean = true
/// execute f only if it is safe to do so, if not, clone data first
private def copyOnWrite(f: => Unit) = {
if (safeToWrite)
f
else {
data = data.clone()
safeToWrite = true
f
}
}
/// read value at Index
def apply(x: Int, y: Int): Pixel = data(domain.index(x, y))
private def rawUpdate(x: Int, y: Int, value: Pixel): Unit = {
data(domain.index(x, y)) = value
}
/// Set pixel value for index
def update(x: Int, y: Int, value: Pixel): Unit = copyOnWrite(rawUpdate(x,y,value))
private def rawTransform(f: Pixel => Pixel): Unit =(0 until domain.length).foreach(i => data(i) = f(data(i)))
/// in-place transformation
def transform(f: Pixel => Pixel): Unit = copyOnWrite(rawTransform(f))
private def rawTransformWithIndex(f: (Int, Int, Pixel) => Pixel): Unit = (0 until domain.length).foreach(i => data(i) = f(domain.x(i), domain.y(i), data(i)))
/// in-place transformation
def transformWithIndex(f: (Int, Int, Pixel) => Pixel): Unit = copyOnWrite(rawTransformWithIndex(f))
private def rawTransformWithIndexParallel(f: (Int, Int, Pixel) => Pixel): Unit = ParVector.range(0, domain.length).foreach(i => data(i) = f(domain.x(i), domain.y(i), data(i)))
/// in-place transformation, parallel execution
def transformWithIndexParallel(f: (Int, Int, Pixel) => Pixel): Unit = copyOnWrite(rawTransformWithIndexParallel(f))
def copyData: Array[Pixel] = data.clone()
private def rawFill(f: => Pixel): Unit = (0 until domain.length).foreach(i => data(i) = f)
/// fill buffer with value
def fill(f: => Pixel): Unit = copyOnWrite(rawFill(f))
/** make a read-only copy, copy is cheap, implemented as copy-on-write on next mutable operation */
def toImage: PixelImage[Pixel] = {
safeToWrite = false // mark data as unsafe to be mutated
PixelImage(domain, data)
}
/** *unsafe* make a read-only copy which is an unsafe view of this buffer, changes with every mutable operation! */
def toUnsafeImage: PixelImage[Pixel] = {
PixelImage(domain, data)
}
}
object ImageBuffer {
def apply[@specialized(Int, Float, Double) Pixel: ClassTag](width: Int, height: Int, data: Array[Pixel]): ImageBuffer[Pixel] = {
new ImageBuffer[Pixel](PixelImageDomain(width, height), data)
}
def apply[@specialized(Int, Float, Double) Pixel: ClassTag](domain: PixelImageDomain, data: Array[Pixel]): ImageBuffer[Pixel] = {
new ImageBuffer[Pixel](domain, data)
}
def apply[Pixel: ClassTag](image: PixelImage[Pixel]): ImageBuffer[Pixel] = image.toBuffer
/// Create an uninitialized image buffer, 2D image
def makeEmptyBuffer[Pixel: ClassTag](width: Int, height: Int): ImageBuffer[Pixel] = {
// create storage: Array
val data = new Array[Pixel](width * height)
ImageBuffer(width, height, data)
}
/// Create initialized image buffer, 2D image
def makeInitializedBuffer[Pixel: ClassTag](width: Int, height: Int)(f: => Pixel): ImageBuffer[Pixel] = {
// create storage: Array
val data = Array.fill(width * height)(f)
ImageBuffer(width, height, data)
}
/// Create initialized image buffer, 2D image
def makeConstantBuffer[Pixel: ClassTag](width: Int, height: Int, value: Pixel): ImageBuffer[Pixel] = {
// create storage: Array
val data = new Array[Pixel](width * height)
var i = 0
while (i < data.length) {
data(i) = value
i += 1
}
ImageBuffer(width, height, data)
}
/// initialize buffer by tabulating a function
def tabulate[Pixel: ClassTag](width: Int, height: Int)(f: (Int, Int) => Pixel): ImageBuffer[Pixel] = {
// create storage: uninitialized Array
val data = new Array[Pixel](width * height)
val buffer = ImageBuffer(width, height, data)
buffer.transformWithIndexParallel((x: Int, y: Int, _: Pixel) => f(x, y))
buffer
}
}
|
unibas-gravis/scalismo-faces
|
src/main/scala/scalismo/faces/image/ImageBuffer.scala
|
Scala
|
apache-2.0
| 4,988 |
package net.magik6k.jliblxc.natives
import java.io.FileDescriptor
import net.magik6k.jliblxc.{Snapshot, BdevSpecs}
private[jliblxc] class NativeLxcContainerStatic {
@native def list(lxcpath: String): Array[String]
@native def listActive(lxcpath: String): Array[String]
@native def open(name: String, configPath: String): Long
}
private[jliblxc] object NativeLxcContainer extends NativeLxcContainerStatic
private[jliblxc] class NativeLxcContainer(private var containerPtr: Long) {
def this(name: String, configPath: String) {this(NativeLxcContainer.open(name, configPath))}
//def this(containerPtr: Long) {this(); this.containerPtr = containerPtr}
if(containerPtr == 0)
throw new NullPointerException("Cannot open container")
////////////
// EXPORT //
////////////
// Static
def free() = {
if(containerPtr != 0) _free(containerPtr)
containerPtr = 0
}
override def finalize() = free()
// Non-static
def getLastError = _getLastError(containerPtr)
def isDefined = _isDefined(containerPtr)
def state() = _state(containerPtr)
def isRunning = _isRunning(containerPtr)
def initPid() = _initPid(containerPtr)
def mayControl() = _mayControl(containerPtr)
def freeze() = _freeze(containerPtr)
def unfreeze() = _unfreeze(containerPtr)
def start(args: Array[String]) = _start(containerPtr, args)
def stop() = _stop(containerPtr)
def reboot() = _reboot(containerPtr)
def shutdown(timeout: Int) = _shutdown(containerPtr, timeout)
def loadConfig(altFile: String) = _loadConfig(containerPtr, altFile)
def saveConfig(altFile: String) = _saveConfig(containerPtr, altFile)
def getConfigPath = _getConfigPath(containerPtr)
def setConfigPath(path: String) = _setConfigPath(containerPtr, path)
def configFileName() = _configFileName(containerPtr)
def setConfigItem(key: String, value: String) = _setConfigItem(containerPtr, key, value)
def clearConfig() = _clearConfig(containerPtr)
def clearConfigItem(key: String) = _clearConfigItem(containerPtr, key)
def getConfigItem(key: String) = _getConfigItem(containerPtr, key)
def getRunningConfigItem(key: String) = _getRunningConfigItem(containerPtr, key)
def getKeys(key: String) = _getKeys(containerPtr, key)
def wantDaemonize(state: Boolean) = _wantDaemonize(containerPtr, state)
def wantCloseAllFDs(state: Boolean) = _wantCloseAllFDs(containerPtr, state)
def waitForState(state: String, timeout: Int) = _waitForState(containerPtr, state, timeout)
def create(template: String, bdType: String, bdSpecs: BdevSpecs, flags: Int, args: Array[String]) = _create(containerPtr, template, bdType, bdSpecs, flags, args)
def cloneContainer(newName: String, lxcPath: String, flags: Int, bDevType: String, bDevData: String, newSize: Long, hookArgs: Array[String])
= new NativeLxcContainer(_cloneContainer(containerPtr, newName, lxcPath, flags, bDevType, bDevData, newSize, hookArgs))
def rename(newName: String) = _rename(containerPtr, newName)
def destroy() = _destroy(containerPtr)
def destroyWithSnapshots() = _destroyWithSnapshots(containerPtr)
def snapshotDestroyAll() = _snapshotDestroyAll(containerPtr)
def checkpoint(directory: String, stop: Boolean, verbose: Boolean) = _checkpoint(containerPtr, directory, stop, verbose)
def restore(directory: String, verbose: Boolean) = _restore(containerPtr, directory, verbose)
def snapshot(commentFile: String) = _snapshot(containerPtr, commentFile)
def snapshotList() = _snapshotList(containerPtr)
def snapshotRestore(snapName: String, newName: String) = _snapshotRestore(containerPtr, snapName, newName)
def snapshotDestroy(snapName: String) = _snapshotDestroy(containerPtr, snapName)
def getInterfaces = _getInterfaces(containerPtr)
def getIps(interface: String, family: String, scope: Int) = _getIps(containerPtr, interface, family, scope)
def attachInterface(device: String, dstDevice: String) = _attachInterface(containerPtr, device, dstDevice)
def detachInterface(device: String, dstDevice: String) = _detachInterface(containerPtr, device, dstDevice)
def getCgroupItem(subSystem: String) = _getCgroupItem(containerPtr, subSystem)
def setCgroupItem(subSystem: String, value: String) = _setCgroupItem(containerPtr, subSystem, value)
def console() = _console(containerPtr)
def attach() = _attach(containerPtr)
def attachRunWait() = _attachRunWait(containerPtr)
def addDeviceNode(srcPath: String, dstPath: String) = _addDeviceNode(containerPtr, srcPath, dstPath)
def removeDeviceNode(srcPath: String, dstPath: String) = _removeDeviceNode(containerPtr, srcPath, dstPath)
/////////////
// NATIVES //
/////////////
// Static
// Native: LxcContainer.c
@native protected def _free(ptr: Long): Long
// Non-static
// Native: LxcContainerInfo.c
@native protected def _getLastError(ptr: Long): String
@native protected def _isDefined(ptr: Long): Boolean
@native protected def _state(ptr: Long): String
@native protected def _isRunning(ptr: Long): Boolean
@native protected def _initPid(ptr: Long): Int
@native protected def _mayControl(ptr: Long): Boolean
// Native: LxcContainerPower.c
@native protected def _freeze(ptr: Long): Boolean
@native protected def _unfreeze(ptr: Long): Boolean
@native protected def _start(ptr: Long, args: Array[String]): Boolean
@native protected def _stop(ptr: Long): Boolean
@native protected def _reboot(ptr: Long): Boolean
@native protected def _shutdown(ptr: Long, timeout: Int): Boolean
// Native: LxcContainerConfig.c
@native protected def _loadConfig(ptr: Long, altFile: String): Boolean
@native protected def _saveConfig(ptr: Long, altFile: String): Boolean
@native protected def _getConfigPath(ptr: Long): String
@native protected def _setConfigPath(ptr: Long, filePath: String): Boolean
@native protected def _configFileName(ptr: Long): String
@native protected def _setConfigItem(ptr: Long, key: String, value: String): Boolean
@native protected def _clearConfig(ptr: Long): Unit
@native protected def _clearConfigItem(ptr: Long, key: String): Boolean
@native protected def _getConfigItem(ptr: Long, key: String): String
@native protected def _getRunningConfigItem(ptr: Long, key: String): String
@native protected def _getKeys(ptr: Long, key: String): Array[String]
// Native: LxcContainerState.c
@native protected def _wantDaemonize(ptr: Long, state: Boolean): Boolean
@native protected def _wantCloseAllFDs(ptr: Long, state: Boolean): Boolean
@native protected def _waitForState(ptr: Long, state: String, timeout: Int): Boolean
// Native: LxcContainerManage.c
@native protected def _create(ptr: Long, template: String, bdType: String, bdSpecs: BdevSpecs, flags: Int, args: Array[String]): Boolean
@native protected def _cloneContainer(ptr: Long, newName: String, lxcPath: String, flags: Int, bDevType: String, bDevData: String, newSize: Long, hookArgs: Array[String]): Long
@native protected def _rename(ptr: Long, newName: String): Boolean
@native protected def _destroy(ptr: Long): Boolean
@native protected def _destroyWithSnapshots(ptr: Long): Boolean
@native protected def _snapshotDestroyAll(ptr: Long): Boolean
// Native: LxcContainerSnapshot.c
@native protected def _checkpoint(ptr: Long, directory: String, stop: Boolean, verbose: Boolean): Boolean
@native protected def _restore(ptr: Long, directory: String, verbose: Boolean): Boolean
@native protected def _snapshot(ptr: Long, commentFile: String): Int
@native protected def _snapshotList(ptr: Long): Array[Snapshot]
@native protected def _snapshotRestore(ptr: Long, snapName: String, newName: String): Boolean
@native protected def _snapshotDestroy(ptr: Long, snapName: String): Boolean
// Native: LxcContainerNetwork.c
@native protected def _getInterfaces(ptr: Long): Array[String]
@native protected def _getIps(ptr: Long, interface: String, family: String, scope: Int): Array[String]
@native protected def _attachInterface(ptr: Long, device: String, dstDevice: String): Boolean
@native protected def _detachInterface(ptr: Long, device: String, dstDevice: String): Boolean
// Native: LxcContainerCgroup.c
@native protected def _getCgroupItem(ptr: Long, subSystem: String): String
@native protected def _setCgroupItem(ptr: Long, subSystem: String, value: String): Boolean
// Native: LxcContainerConsole.c
@native protected def _console(ptr: Long): FileDescriptor
@native protected def _attach(ptr: Long): Process
@native protected def _attachRunWait(ptr: Long): Int
// Native: LxcContainerDevice.c
@native protected def _addDeviceNode(ptr: Long, srcPath: String, dstPath: String): Boolean
@native protected def _removeDeviceNode(ptr: Long, srcPath: String, dstPath: String): Boolean
}
|
magik6k/jLibLXC
|
src/main/scala/net/magik6k/jliblxc/natives/NativeLxcContainer.scala
|
Scala
|
mit
| 8,756 |
package com.twitter.finatra.kafkastreams.query
import com.twitter.finatra.kafkastreams.transformer.stores.internal.FinatraStoresGlobalManager
import com.twitter.finatra.streams.queryable.thrift.domain.ServiceShardId
import com.twitter.finatra.streams.queryable.thrift.partitioning.KafkaPartitioner
import com.twitter.finatra.streams.queryable.thrift.partitioning.StaticServiceShardPartitioner
import com.twitter.util.logging.Logging
import java.io.File
import org.apache.kafka.common.serialization.Serde
import org.apache.kafka.streams.errors.InvalidStateStoreException
import org.apache.kafka.streams.state.KeyValueIterator
/**
* A queryable Finatra key value store for use by endpoints exposing queryable state
*/
class QueryableFinatraKeyValueStore[PK, K, V](
stateDir: File,
storeName: String,
primaryKeySerde: Serde[PK],
numShards: Int,
numQueryablePartitions: Int,
currentShardId: Int)
extends Logging {
private val primaryKeySerializer = primaryKeySerde.serializer()
private val currentServiceShardId = ServiceShardId(currentShardId)
private val partitioner = new KafkaPartitioner(
StaticServiceShardPartitioner(numShards = numShards),
numPartitions = numQueryablePartitions
)
/**
* Get the value corresponding to this key.
*
* @param key The key to fetch
*
* @return The value or null if no value is found.
*
* @throws NullPointerException If null is used for key.
* @throws InvalidStateStoreException if the store is not initialized
*/
@throws[InvalidStateStoreException]
def get(primaryKey: PK, key: K): Option[V] = {
Option(
FinatraStoresGlobalManager
.getStore[K, V](stateDir, storeName, numQueryablePartitions, getPrimaryKeyBytes(primaryKey))
.get(key))
}
/**
* Get an iterator over a given range of keys. This iterator must be closed after use.
* The returned iterator must be safe from {@link java.util.ConcurrentModificationException}s
* and must not return null values. No ordering guarantees are provided.
*
* @param from The first key that could be in the range
* @param to The last key that could be in the range
*
* @return The iterator for this range.
*
* @throws NullPointerException If null is used for from or to.
* @throws InvalidStateStoreException if the store is not initialized
*/
def range(primaryKey: PK, from: K, to: K): KeyValueIterator[K, V] = {
FinatraStoresGlobalManager
.getStore[K, V](stateDir, storeName, numQueryablePartitions, getPrimaryKeyBytes(primaryKey))
.range(from, to)
}
/* Private */
private def getPrimaryKeyBytes(primaryKey: PK): Array[Byte] = {
FinatraStoresGlobalManager.primaryKeyBytesIfLocalKey(
partitioner,
currentServiceShardId,
primaryKey,
primaryKeySerializer)
}
}
|
twitter/finatra
|
kafka-streams/kafka-streams/src/main/scala/com/twitter/finatra/kafkastreams/query/QueryableFinatraKeyValueStore.scala
|
Scala
|
apache-2.0
| 2,835 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.util
import java.time.{Duration, Period}
import java.util.concurrent.TimeUnit
import org.apache.spark.SparkFunSuite
import org.apache.spark.sql.catalyst.plans.SQLHelper
import org.apache.spark.sql.catalyst.util.DateTimeConstants._
import org.apache.spark.sql.catalyst.util.DateTimeUtils.millisToMicros
import org.apache.spark.sql.catalyst.util.IntervalStringStyles.{ANSI_STYLE, HIVE_STYLE}
import org.apache.spark.sql.catalyst.util.IntervalUtils._
import org.apache.spark.sql.catalyst.util.IntervalUtils.IntervalUnit._
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types.DayTimeIntervalType
import org.apache.spark.unsafe.types.{CalendarInterval, UTF8String}
class IntervalUtilsSuite extends SparkFunSuite with SQLHelper {
private def checkFromString(input: String, expected: CalendarInterval): Unit = {
assert(stringToInterval(UTF8String.fromString(input)) === expected)
assert(safeStringToInterval(UTF8String.fromString(input)) === expected)
}
private def checkFromInvalidString(input: String, errorMsg: String): Unit = {
failFuncWithInvalidInput(input, errorMsg, s => stringToInterval(UTF8String.fromString(s)))
assert(safeStringToInterval(UTF8String.fromString(input)) === null)
}
private def failFuncWithInvalidInput(
input: String, errorMsg: String, converter: String => CalendarInterval): Unit = {
withClue("Expected to throw an exception for the invalid input") {
val e = intercept[IllegalArgumentException](converter(input))
assert(e.getMessage.contains(errorMsg))
}
}
private def testSingleUnit(
unit: String, number: Int, months: Int, days: Int, microseconds: Long): Unit = {
for (prefix <- Seq("interval ", "")) {
val input1 = prefix + number + " " + unit
val input2 = prefix + number + " " + unit + "s"
val result = new CalendarInterval(months, days, microseconds)
checkFromString(input1, result)
checkFromString(input2, result)
}
}
test("string to interval: basic") {
testSingleUnit("YEAR", 3, 36, 0, 0)
testSingleUnit("Month", 3, 3, 0, 0)
testSingleUnit("Week", 3, 0, 21, 0)
testSingleUnit("DAY", 3, 0, 3, 0)
testSingleUnit("HouR", 3, 0, 0, 3 * MICROS_PER_HOUR)
testSingleUnit("MiNuTe", 3, 0, 0, 3 * MICROS_PER_MINUTE)
testSingleUnit("Second", 3, 0, 0, 3 * MICROS_PER_SECOND)
testSingleUnit("MilliSecond", 3, 0, 0, millisToMicros(3))
testSingleUnit("MicroSecond", 3, 0, 0, 3)
checkFromInvalidString(null, "cannot be null")
for (input <- Seq("", "interval", "foo", "foo 1 day")) {
checkFromInvalidString(input, "Error parsing")
}
}
test("string to interval: interval with dangling parts should not results null") {
checkFromInvalidString("+", "expect a number after '+' but hit EOL")
checkFromInvalidString("-", "expect a number after '-' but hit EOL")
checkFromInvalidString("+ 2", "expect a unit name after '2' but hit EOL")
checkFromInvalidString("- 1", "expect a unit name after '1' but hit EOL")
checkFromInvalidString("1", "expect a unit name after '1' but hit EOL")
checkFromInvalidString("1.2", "expect a unit name after '1.2' but hit EOL")
checkFromInvalidString("1 day 2", "expect a unit name after '2' but hit EOL")
checkFromInvalidString("1 day 2.2", "expect a unit name after '2.2' but hit EOL")
checkFromInvalidString("1 day -", "expect a number after '-' but hit EOL")
checkFromInvalidString("-.", "expect a unit name after '-.' but hit EOL")
}
test("string to interval: multiple units") {
Seq(
"-1 MONTH 1 day -1 microseconds" -> new CalendarInterval(-1, 1, -1),
" 123 MONTHS 123 DAYS 123 Microsecond " -> new CalendarInterval(123, 123, 123),
"interval -1 day +3 Microseconds" -> new CalendarInterval(0, -1, 3),
"interval - 1 day + 3 Microseconds" -> new CalendarInterval(0, -1, 3),
" interval 8 years -11 months 123 weeks -1 day " +
"23 hours -22 minutes 1 second -123 millisecond 567 microseconds " ->
new CalendarInterval(85, 860, 81480877567L)).foreach { case (input, expected) =>
checkFromString(input, expected)
}
}
test("string to interval: special cases") {
// Support any order of interval units
checkFromString("1 day 1 year", new CalendarInterval(12, 1, 0))
// Allow duplicated units and summarize their values
checkFromString("1 day 10 day", new CalendarInterval(0, 11, 0))
// Only the seconds units can have the fractional part
checkFromInvalidString("1.5 days", "'days' cannot have fractional part")
checkFromInvalidString("1. hour", "'hour' cannot have fractional part")
checkFromInvalidString("1 hourX", "invalid unit 'hourx'")
checkFromInvalidString("~1 hour", "unrecognized number '~1'")
checkFromInvalidString("1 Mour", "invalid unit 'mour'")
checkFromInvalidString("1 aour", "invalid unit 'aour'")
checkFromInvalidString("1a1 hour", "invalid value '1a1'")
checkFromInvalidString("1.1a1 seconds", "invalid value '1.1a1'")
checkFromInvalidString("2234567890 days", "integer overflow")
checkFromInvalidString(". seconds", "invalid value '.'")
}
test("string to interval: whitespaces") {
checkFromInvalidString(" ", "Error parsing ' ' to interval")
checkFromInvalidString("\\n", "Error parsing '\\n' to interval")
checkFromInvalidString("\\t", "Error parsing '\\t' to interval")
checkFromString("1 \\t day \\n 2 \\r hour", new CalendarInterval(0, 1, 2 * MICROS_PER_HOUR))
checkFromInvalidString("interval1 \\t day \\n 2 \\r hour", "invalid interval prefix interval1")
checkFromString("interval\\r1\\tday", new CalendarInterval(0, 1, 0))
// scalastyle:off nonascii
checkFromInvalidString("中国 interval 1 day", "unrecognized number '中国'")
checkFromInvalidString("interval浙江 1 day", "invalid interval prefix interval浙江")
checkFromInvalidString("interval 1杭州 day", "invalid value '1杭州'")
checkFromInvalidString("interval 1 滨江day", "invalid unit '滨江day'")
checkFromInvalidString("interval 1 day长河", "invalid unit 'day长河'")
checkFromInvalidString("interval 1 day 网商路", "unrecognized number '网商路'")
// scalastyle:on nonascii
}
test("string to interval: seconds with fractional part") {
checkFromString("0.1 seconds", new CalendarInterval(0, 0, 100000))
checkFromString("1. seconds", new CalendarInterval(0, 0, 1000000))
checkFromString("123.001 seconds", new CalendarInterval(0, 0, 123001000))
checkFromString("1.001001 seconds", new CalendarInterval(0, 0, 1001001))
checkFromString("1 minute 1.001001 seconds", new CalendarInterval(0, 0, 61001001))
checkFromString("-1.5 seconds", new CalendarInterval(0, 0, -1500000))
// truncate nanoseconds to microseconds
checkFromString("0.999999999 seconds", new CalendarInterval(0, 0, 999999))
checkFromString(".999999999 seconds", new CalendarInterval(0, 0, 999999))
checkFromInvalidString("0.123456789123 seconds", "'0.123456789123' is out of range")
}
test("from year-month string") {
assert(fromYearMonthString("99-10") === new CalendarInterval(99 * 12 + 10, 0, 0L))
assert(fromYearMonthString("+99-10") === new CalendarInterval(99 * 12 + 10, 0, 0L))
assert(fromYearMonthString("-8-10") === new CalendarInterval(-8 * 12 - 10, 0, 0L))
failFuncWithInvalidInput("99-15", "month 15 outside range", fromYearMonthString)
failFuncWithInvalidInput("9a9-15", "Interval string does not match year-month format",
fromYearMonthString)
// whitespaces
assert(fromYearMonthString("99-10 ") === new CalendarInterval(99 * 12 + 10, 0, 0L))
assert(fromYearMonthString("+99-10\\t") === new CalendarInterval(99 * 12 + 10, 0, 0L))
assert(fromYearMonthString("\\t\\t-8-10\\t") === new CalendarInterval(-8 * 12 - 10, 0, 0L))
failFuncWithInvalidInput("99\\t-15", "Interval string does not match year-month format",
fromYearMonthString)
failFuncWithInvalidInput("-\\t99-15", "Interval string does not match year-month format",
fromYearMonthString)
assert(fromYearMonthString("178956970-6") == new CalendarInterval(Int.MaxValue - 1, 0, 0))
assert(fromYearMonthString("178956970-7") == new CalendarInterval(Int.MaxValue, 0, 0))
val e1 = intercept[IllegalArgumentException]{
assert(fromYearMonthString("178956970-8") == new CalendarInterval(Int.MinValue, 0, 0))
}.getMessage
assert(e1.contains("integer overflow"))
assert(fromYearMonthString("-178956970-8") == new CalendarInterval(Int.MinValue, 0, 0))
val e2 = intercept[IllegalArgumentException]{
assert(fromYearMonthString("-178956970-9") == new CalendarInterval(Int.MinValue, 0, 0))
}.getMessage
assert(e2.contains("integer overflow"))
}
test("from day-time string - legacy") {
withSQLConf(SQLConf.LEGACY_FROM_DAYTIME_STRING.key -> "true") {
assert(fromDayTimeString("5 12:40:30.999999999") ===
new CalendarInterval(
0,
5,
12 * MICROS_PER_HOUR +
40 * MICROS_PER_MINUTE +
30 * MICROS_PER_SECOND + 999999L))
assert(fromDayTimeString("10 0:12:0.888") ===
new CalendarInterval(
0,
10,
12 * MICROS_PER_MINUTE + millisToMicros(888)))
assert(fromDayTimeString("-3 0:0:0") === new CalendarInterval(0, -3, 0L))
failFuncWithInvalidInput("5 30:12:20", "hour 30 outside range", fromDayTimeString)
failFuncWithInvalidInput("5 30-12", "must match day-time format", fromDayTimeString)
failFuncWithInvalidInput("5 1:12:20", "Cannot support (interval",
s => fromDayTimeString(s, HOUR, MICROSECOND))
}
}
test("interval duration") {
def duration(s: String, unit: TimeUnit, daysPerMonth: Int): Long = {
IntervalUtils.getDuration(stringToInterval(UTF8String.fromString(s)), unit, daysPerMonth)
}
assert(duration("0 seconds", TimeUnit.MILLISECONDS, 31) === 0)
assert(duration("1 month", TimeUnit.DAYS, 31) === 31)
assert(duration("1 microsecond", TimeUnit.MICROSECONDS, 30) === 1)
assert(duration("1 month -30 days", TimeUnit.DAYS, 31) === 1)
val e = intercept[ArithmeticException] {
duration(Integer.MAX_VALUE + " month", TimeUnit.SECONDS, 31)
}
assert(e.getMessage.contains("overflow"))
}
test("negative interval") {
def isNegative(s: String, daysPerMonth: Int): Boolean = {
IntervalUtils.isNegative(stringToInterval(UTF8String.fromString(s)), daysPerMonth)
}
assert(isNegative("-1 months", 28))
assert(isNegative("-1 microsecond", 30))
assert(isNegative("-1 month 30 days", 31))
assert(isNegative("2 months -61 days", 30))
assert(isNegative("-1 year -2 seconds", 30))
assert(!isNegative("0 months", 28))
assert(!isNegative("1 year -360 days", 31))
assert(!isNegative("-1 year 380 days", 31))
}
test("negate") {
assert(negateExact(new CalendarInterval(1, 2, 3)) === new CalendarInterval(-1, -2, -3))
assert(negate(new CalendarInterval(1, 2, 3)) === new CalendarInterval(-1, -2, -3))
}
test("subtract one interval by another") {
val input1 = new CalendarInterval(3, 1, 1 * MICROS_PER_HOUR)
val input2 = new CalendarInterval(2, 4, 100 * MICROS_PER_HOUR)
val input3 = new CalendarInterval(-10, -30, -81 * MICROS_PER_HOUR)
val input4 = new CalendarInterval(75, 150, 200 * MICROS_PER_HOUR)
Seq[(CalendarInterval, CalendarInterval) => CalendarInterval](subtractExact, subtract)
.foreach { func =>
assert(new CalendarInterval(1, -3, -99 * MICROS_PER_HOUR) === func(input1, input2))
assert(new CalendarInterval(-85, -180, -281 * MICROS_PER_HOUR) === func(input3, input4))
}
}
test("add two intervals") {
val input1 = new CalendarInterval(3, 1, 1 * MICROS_PER_HOUR)
val input2 = new CalendarInterval(2, 4, 100 * MICROS_PER_HOUR)
val input3 = new CalendarInterval(-10, -30, -81 * MICROS_PER_HOUR)
val input4 = new CalendarInterval(75, 150, 200 * MICROS_PER_HOUR)
Seq[(CalendarInterval, CalendarInterval) => CalendarInterval](addExact, add).foreach { func =>
assert(new CalendarInterval(5, 5, 101 * MICROS_PER_HOUR) === func(input1, input2))
assert(new CalendarInterval(65, 120, 119 * MICROS_PER_HOUR) === func(input3, input4))
}
}
test("multiply by num") {
Seq[(CalendarInterval, Double) => CalendarInterval](multiply, multiplyExact).foreach { func =>
var interval = new CalendarInterval(0, 0, 0)
assert(interval === func(interval, 0))
interval = new CalendarInterval(123, 456, 789)
assert(new CalendarInterval(123 * 42, 456 * 42, 789 * 42) === func(interval, 42))
interval = new CalendarInterval(-123, -456, -789)
assert(new CalendarInterval(-123 * 42, -456 * 42, -789 * 42) === func(interval, 42))
interval = new CalendarInterval(1, 5, 0)
assert(new CalendarInterval(1, 7, 12 * MICROS_PER_HOUR) === func(interval, 1.5))
interval = new CalendarInterval(2, 2, 2 * MICROS_PER_HOUR)
assert(new CalendarInterval(2, 2, 12 * MICROS_PER_HOUR) === func(interval, 1.2))
}
val interval = new CalendarInterval(2, 0, 0)
assert(multiply(interval, Integer.MAX_VALUE) === new CalendarInterval(Int.MaxValue, 0, 0))
val e = intercept[ArithmeticException](multiplyExact(interval, Integer.MAX_VALUE))
assert(e.getMessage.contains("overflow"))
}
test("divide by num") {
Seq[(CalendarInterval, Double) => CalendarInterval](divide, divideExact).foreach { func =>
var interval = new CalendarInterval(0, 0, 0)
assert(interval === func(interval, 10))
interval = new CalendarInterval(1, 3, 30 * MICROS_PER_SECOND)
assert(new CalendarInterval(0, 1, 12 * MICROS_PER_HOUR + 15 * MICROS_PER_SECOND) ===
func(interval, 2))
assert(new CalendarInterval(2, 6, MICROS_PER_MINUTE) === func(interval, 0.5))
interval = new CalendarInterval(-1, 0, -30 * MICROS_PER_SECOND)
assert(new CalendarInterval(0, 0, -15 * MICROS_PER_SECOND) === func(interval, 2))
assert(new CalendarInterval(-2, 0, -MICROS_PER_MINUTE) === func(interval, 0.5))
}
var interval = new CalendarInterval(Int.MaxValue, Int.MaxValue, 0)
assert(divide(interval, 0.9) === new CalendarInterval(Int.MaxValue, Int.MaxValue,
((Int.MaxValue / 9.0) * MICROS_PER_DAY).round))
val e1 = intercept[ArithmeticException](divideExact(interval, 0.9))
assert(e1.getMessage.contains("integer overflow"))
interval = new CalendarInterval(123, 456, 789)
assert(divide(interval, 0) === null)
val e2 = intercept[ArithmeticException](divideExact(interval, 0))
assert(e2.getMessage.contains("divide by zero"))
}
test("from day-time string") {
def check(input: String, from: IntervalUnit, to: IntervalUnit, expected: String): Unit = {
withClue(s"from = $from, to = $to") {
val expectedUtf8 = UTF8String.fromString(expected)
assert(fromDayTimeString(input, from, to) === safeStringToInterval(expectedUtf8))
}
}
def checkFail(input: String, from: IntervalUnit, to: IntervalUnit, errMsg: String): Unit = {
failFuncWithInvalidInput(input, errMsg, s => fromDayTimeString(s, from, to))
}
check("12:40", HOUR, MINUTE, "12 hours 40 minutes")
check("+12:40", HOUR, MINUTE, "12 hours 40 minutes")
check("-12:40", HOUR, MINUTE, "-12 hours -40 minutes")
checkFail("5 12:40", HOUR, MINUTE, "must match day-time format")
check("12:40:30.999999999", HOUR, SECOND, "12 hours 40 minutes 30.999999 seconds")
check("+12:40:30.123456789", HOUR, SECOND, "12 hours 40 minutes 30.123456 seconds")
check("-12:40:30.123456789", HOUR, SECOND, "-12 hours -40 minutes -30.123456 seconds")
checkFail("5 12:40:30", HOUR, SECOND, "must match day-time format")
checkFail("12:40:30.0123456789", HOUR, SECOND, "must match day-time format")
check("40:30.123456789", MINUTE, SECOND, "40 minutes 30.123456 seconds")
check("+40:30.123456789", MINUTE, SECOND, "40 minutes 30.123456 seconds")
check("-40:30.123456789", MINUTE, SECOND, "-40 minutes -30.123456 seconds")
checkFail("12:40:30", MINUTE, SECOND, "must match day-time format")
check("5 12", DAY, HOUR, "5 days 12 hours")
check("+5 12", DAY, HOUR, "5 days 12 hours")
check("-5 12", DAY, HOUR, "-5 days -12 hours")
checkFail("5 12:30", DAY, HOUR, "must match day-time format")
check("5 12:40", DAY, MINUTE, "5 days 12 hours 40 minutes")
check("+5 12:40", DAY, MINUTE, "5 days 12 hours 40 minutes")
check("-5 12:40", DAY, MINUTE, "-5 days -12 hours -40 minutes")
checkFail("5 12", DAY, MINUTE, "must match day-time format")
check("5 12:40:30.123", DAY, SECOND, "5 days 12 hours 40 minutes 30.123 seconds")
check("+5 12:40:30.123456", DAY, SECOND, "5 days 12 hours 40 minutes 30.123456 seconds")
check("-5 12:40:30.123456789", DAY, SECOND, "-5 days -12 hours -40 minutes -30.123456 seconds")
checkFail("5 12", DAY, SECOND, "must match day-time format")
checkFail("5 30:12:20", DAY, SECOND, "hour 30 outside range")
checkFail("5 30-12", DAY, SECOND, "must match day-time format")
checkFail("5 1:12:20", HOUR, MICROSECOND, "Cannot support (interval")
// whitespaces
check("\\t +5 12:40\\t ", DAY, MINUTE, "5 days 12 hours 40 minutes")
checkFail("+5\\t 12:40", DAY, MINUTE, "must match day-time format")
}
test("interval overflow check") {
val maxMonth = new CalendarInterval(Int.MaxValue, 0, 0)
val minMonth = new CalendarInterval(Int.MinValue, 0, 0)
val oneMonth = new CalendarInterval(1, 0, 0)
val maxDay = new CalendarInterval(0, Int.MaxValue, 0)
val minDay = new CalendarInterval(0, Int.MinValue, 0)
val oneDay = new CalendarInterval(0, 1, 0)
val maxMicros = new CalendarInterval(0, 0, Long.MaxValue)
val minMicros = new CalendarInterval(0, 0, Long.MinValue)
val oneMicros = new CalendarInterval(0, 0, 1)
intercept[ArithmeticException](negateExact(minMonth))
assert(negate(minMonth) === minMonth)
intercept[ArithmeticException](addExact(maxMonth, oneMonth))
intercept[ArithmeticException](addExact(maxDay, oneDay))
intercept[ArithmeticException](addExact(maxMicros, oneMicros))
assert(add(maxMonth, oneMonth) === minMonth)
assert(add(maxDay, oneDay) === minDay)
assert(add(maxMicros, oneMicros) === minMicros)
intercept[ArithmeticException](subtractExact(minDay, oneDay))
intercept[ArithmeticException](subtractExact(minMonth, oneMonth))
intercept[ArithmeticException](subtractExact(minMicros, oneMicros))
assert(subtract(minMonth, oneMonth) === maxMonth)
assert(subtract(minDay, oneDay) === maxDay)
assert(subtract(minMicros, oneMicros) === maxMicros)
intercept[ArithmeticException](multiplyExact(maxMonth, 2))
intercept[ArithmeticException](divideExact(maxDay, 0.5))
}
test("SPARK-34605: microseconds to duration") {
assert(microsToDuration(0).isZero)
assert(microsToDuration(-1).toNanos === -1000)
assert(microsToDuration(1).toNanos === 1000)
assert(microsToDuration(Long.MaxValue).toDays === 106751991)
assert(microsToDuration(Long.MinValue).toDays === -106751991)
}
test("SPARK-34605: duration to microseconds") {
assert(durationToMicros(Duration.ZERO) === 0)
assert(durationToMicros(Duration.ofSeconds(-1)) === -1000000)
assert(durationToMicros(Duration.ofNanos(123456)) === 123)
assert(durationToMicros(Duration.ofDays(106751991)) ===
(Long.MaxValue / MICROS_PER_DAY) * MICROS_PER_DAY)
val errMsg = intercept[ArithmeticException] {
durationToMicros(Duration.ofDays(106751991 + 1))
}.getMessage
assert(errMsg.contains("long overflow"))
}
test("SPARK-34615: period to months") {
assert(periodToMonths(Period.ZERO) === 0)
assert(periodToMonths(Period.of(0, -1, 0)) === -1)
assert(periodToMonths(Period.of(-1, 0, 10)) === -12) // ignore days
assert(periodToMonths(Period.of(178956970, 7, 0)) === Int.MaxValue)
assert(periodToMonths(Period.of(-178956970, -8, 123)) === Int.MinValue)
assert(periodToMonths(Period.of(0, Int.MaxValue, Int.MaxValue)) === Int.MaxValue)
val errMsg = intercept[ArithmeticException] {
periodToMonths(Period.of(Int.MaxValue, 0, 0))
}.getMessage
assert(errMsg.contains("integer overflow"))
}
test("SPARK-34615: months to period") {
assert(monthsToPeriod(0) === Period.ZERO)
assert(monthsToPeriod(-11) === Period.of(0, -11, 0))
assert(monthsToPeriod(11) === Period.of(0, 11, 0))
assert(monthsToPeriod(27) === Period.of(2, 3, 0))
assert(monthsToPeriod(-13) === Period.of(-1, -1, 0))
assert(monthsToPeriod(Int.MaxValue) === Period.ofYears(178956970).withMonths(7))
assert(monthsToPeriod(Int.MinValue) === Period.ofYears(-178956970).withMonths(-8))
}
test("SPARK-34695: round trip conversion of micros -> duration -> micros") {
Seq(
0,
MICROS_PER_SECOND - 1,
-MICROS_PER_SECOND + 1,
MICROS_PER_SECOND,
-MICROS_PER_SECOND,
Long.MaxValue - MICROS_PER_SECOND,
Long.MinValue + MICROS_PER_SECOND,
Long.MaxValue,
Long.MinValue).foreach { micros =>
val duration = microsToDuration(micros)
assert(durationToMicros(duration) === micros)
}
}
test("SPARK-34715: Add round trip tests for period <-> month and duration <-> micros") {
// Months -> Period -> Months
Seq(
0,
MONTHS_PER_YEAR - 1,
MONTHS_PER_YEAR + 1,
MONTHS_PER_YEAR,
-MONTHS_PER_YEAR,
Int.MaxValue - MONTHS_PER_YEAR,
Int.MinValue + MONTHS_PER_YEAR,
Int.MaxValue,
Int.MinValue).foreach { months =>
val period = monthsToPeriod(months)
assert(periodToMonths(period) === months)
}
// Period -> Months -> Period
Seq(
monthsToPeriod(0),
monthsToPeriod(MONTHS_PER_YEAR - 1),
monthsToPeriod(MONTHS_PER_YEAR + 1),
monthsToPeriod(MONTHS_PER_YEAR),
monthsToPeriod(-MONTHS_PER_YEAR),
monthsToPeriod(Int.MaxValue - MONTHS_PER_YEAR),
monthsToPeriod(Int.MinValue + MONTHS_PER_YEAR),
monthsToPeriod(Int.MaxValue),
monthsToPeriod(Int.MinValue)).foreach { period =>
val months = periodToMonths(period)
assert(monthsToPeriod(months) === period)
}
// Duration -> micros -> Duration
Seq(
microsToDuration(0),
microsToDuration(MICROS_PER_SECOND - 1),
microsToDuration(-MICROS_PER_SECOND + 1),
microsToDuration(MICROS_PER_SECOND),
microsToDuration(-MICROS_PER_SECOND),
microsToDuration(Long.MaxValue - MICROS_PER_SECOND),
microsToDuration(Long.MinValue + MICROS_PER_SECOND),
microsToDuration(Long.MaxValue),
microsToDuration(Long.MinValue)).foreach { duration =>
val micros = durationToMicros(duration)
assert(microsToDuration(micros) === duration)
}
}
test("SPARK-35016: format year-month intervals") {
import org.apache.spark.sql.types.YearMonthIntervalType._
Seq(
0 -> ("0-0", "INTERVAL '0-0' YEAR TO MONTH"),
-11 -> ("-0-11", "INTERVAL '-0-11' YEAR TO MONTH"),
11 -> ("0-11", "INTERVAL '0-11' YEAR TO MONTH"),
-13 -> ("-1-1", "INTERVAL '-1-1' YEAR TO MONTH"),
13 -> ("1-1", "INTERVAL '1-1' YEAR TO MONTH"),
-24 -> ("-2-0", "INTERVAL '-2-0' YEAR TO MONTH"),
24 -> ("2-0", "INTERVAL '2-0' YEAR TO MONTH"),
Int.MinValue -> ("-178956970-8", "INTERVAL '-178956970-8' YEAR TO MONTH"),
Int.MaxValue -> ("178956970-7", "INTERVAL '178956970-7' YEAR TO MONTH")
).foreach { case (months, (hiveIntervalStr, ansiIntervalStr)) =>
assert(toYearMonthIntervalString(months, ANSI_STYLE, YEAR, MONTH) === ansiIntervalStr)
assert(toYearMonthIntervalString(months, HIVE_STYLE, YEAR, MONTH) === hiveIntervalStr)
}
}
test("SPARK-35016: format day-time intervals") {
import DayTimeIntervalType._
Seq(
0L -> ("0 00:00:00.000000000", "INTERVAL '0 00:00:00' DAY TO SECOND"),
-1L -> ("-0 00:00:00.000001000", "INTERVAL '-0 00:00:00.000001' DAY TO SECOND"),
10 * MICROS_PER_MILLIS -> ("0 00:00:00.010000000", "INTERVAL '0 00:00:00.01' DAY TO SECOND"),
(-123 * MICROS_PER_DAY - 3 * MICROS_PER_SECOND) ->
("-123 00:00:03.000000000", "INTERVAL '-123 00:00:03' DAY TO SECOND"),
Long.MinValue -> ("-106751991 04:00:54.775808000",
"INTERVAL '-106751991 04:00:54.775808' DAY TO SECOND")
).foreach { case (micros, (hiveIntervalStr, ansiIntervalStr)) =>
assert(toDayTimeIntervalString(micros, ANSI_STYLE, DAY, SECOND) === ansiIntervalStr)
assert(toDayTimeIntervalString(micros, HIVE_STYLE, DAY, SECOND) === hiveIntervalStr)
}
}
test("SPARK-35734: Format day-time intervals using type fields") {
import DayTimeIntervalType._
Seq(
0L ->
("INTERVAL '0 00:00:00' DAY TO SECOND",
"INTERVAL '0 00:00' DAY TO MINUTE",
"INTERVAL '0 00' DAY TO HOUR",
"INTERVAL '00:00:00' HOUR TO SECOND",
"INTERVAL '00:00' HOUR TO MINUTE",
"INTERVAL '00:00' MINUTE TO SECOND",
"INTERVAL '0' DAY",
"INTERVAL '00' HOUR",
"INTERVAL '00' MINUTE",
"INTERVAL '00' SECOND"),
-1L ->
("INTERVAL '-0 00:00:00.000001' DAY TO SECOND",
"INTERVAL '-0 00:00' DAY TO MINUTE",
"INTERVAL '-0 00' DAY TO HOUR",
"INTERVAL '-00:00:00.000001' HOUR TO SECOND",
"INTERVAL '-00:00' HOUR TO MINUTE",
"INTERVAL '-00:00.000001' MINUTE TO SECOND",
"INTERVAL '-0' DAY",
"INTERVAL '-00' HOUR",
"INTERVAL '-00' MINUTE",
"INTERVAL '-00.000001' SECOND"),
10 * MICROS_PER_MILLIS ->
("INTERVAL '0 00:00:00.01' DAY TO SECOND",
"INTERVAL '0 00:00' DAY TO MINUTE",
"INTERVAL '0 00' DAY TO HOUR",
"INTERVAL '00:00:00.01' HOUR TO SECOND",
"INTERVAL '00:00' HOUR TO MINUTE",
"INTERVAL '00:00.01' MINUTE TO SECOND",
"INTERVAL '0' DAY",
"INTERVAL '00' HOUR",
"INTERVAL '00' MINUTE",
"INTERVAL '00.01' SECOND"),
(-123 * MICROS_PER_DAY - 3 * MICROS_PER_SECOND) ->
("INTERVAL '-123 00:00:03' DAY TO SECOND",
"INTERVAL '-123 00:00' DAY TO MINUTE",
"INTERVAL '-123 00' DAY TO HOUR",
"INTERVAL '-2952:00:03' HOUR TO SECOND",
"INTERVAL '-2952:00' HOUR TO MINUTE",
"INTERVAL '-177120:03' MINUTE TO SECOND",
"INTERVAL '-123' DAY",
"INTERVAL '-2952' HOUR",
"INTERVAL '-177120' MINUTE",
"INTERVAL '-10627203' SECOND"),
Long.MinValue ->
("INTERVAL '-106751991 04:00:54.775808' DAY TO SECOND",
"INTERVAL '-106751991 04:00' DAY TO MINUTE",
"INTERVAL '-106751991 04' DAY TO HOUR",
"INTERVAL '-2562047788:00:54.775808' HOUR TO SECOND",
"INTERVAL '-2562047788:00' HOUR TO MINUTE",
"INTERVAL '-153722867280:54.775808' MINUTE TO SECOND",
"INTERVAL '-106751991' DAY",
"INTERVAL '-2562047788' HOUR",
"INTERVAL '-153722867280' MINUTE",
"INTERVAL '-9223372036854.775808' SECOND"),
69159782123456L ->
("INTERVAL '800 11:03:02.123456' DAY TO SECOND",
"INTERVAL '800 11:03' DAY TO MINUTE",
"INTERVAL '800 11' DAY TO HOUR",
"INTERVAL '19211:03:02.123456' HOUR TO SECOND",
"INTERVAL '19211:03' HOUR TO MINUTE",
"INTERVAL '1152663:02.123456' MINUTE TO SECOND",
"INTERVAL '800' DAY",
"INTERVAL '19211' HOUR",
"INTERVAL '1152663' MINUTE",
"INTERVAL '69159782.123456' SECOND"),
-69159782123456L ->
("INTERVAL '-800 11:03:02.123456' DAY TO SECOND",
"INTERVAL '-800 11:03' DAY TO MINUTE",
"INTERVAL '-800 11' DAY TO HOUR",
"INTERVAL '-19211:03:02.123456' HOUR TO SECOND",
"INTERVAL '-19211:03' HOUR TO MINUTE",
"INTERVAL '-1152663:02.123456' MINUTE TO SECOND",
"INTERVAL '-800' DAY",
"INTERVAL '-19211' HOUR",
"INTERVAL '-1152663' MINUTE",
"INTERVAL '-69159782.123456' SECOND")
).foreach {
case (
micros, (
dayToSec,
dayToMinute,
dayToHour,
hourToSec,
hourToMinute,
minuteToSec,
day,
hour,
minute,
sec)) =>
assert(toDayTimeIntervalString(micros, ANSI_STYLE, DAY, SECOND) === dayToSec)
assert(toDayTimeIntervalString(micros, ANSI_STYLE, DAY, MINUTE) === dayToMinute)
assert(toDayTimeIntervalString(micros, ANSI_STYLE, DAY, HOUR) === dayToHour)
assert(toDayTimeIntervalString(micros, ANSI_STYLE, HOUR, SECOND) === hourToSec)
assert(toDayTimeIntervalString(micros, ANSI_STYLE, HOUR, MINUTE) === hourToMinute)
assert(toDayTimeIntervalString(micros, ANSI_STYLE, MINUTE, SECOND) === minuteToSec)
assert(toDayTimeIntervalString(micros, ANSI_STYLE, DAY, DAY) === day)
assert(toDayTimeIntervalString(micros, ANSI_STYLE, HOUR, HOUR) === hour)
assert(toDayTimeIntervalString(micros, ANSI_STYLE, MINUTE, MINUTE) === minute)
assert(toDayTimeIntervalString(micros, ANSI_STYLE, SECOND, SECOND) === sec)
}
}
test("SPARK-35771: Format year-month intervals using type fields") {
import org.apache.spark.sql.types.YearMonthIntervalType._
Seq(
0 ->
("INTERVAL '0-0' YEAR TO MONTH", "INTERVAL '0' YEAR", "INTERVAL '0' MONTH"),
-11 -> ("INTERVAL '-0-11' YEAR TO MONTH", "INTERVAL '-0' YEAR", "INTERVAL '-11' MONTH"),
11 -> ("INTERVAL '0-11' YEAR TO MONTH", "INTERVAL '0' YEAR", "INTERVAL '11' MONTH"),
-13 -> ("INTERVAL '-1-1' YEAR TO MONTH", "INTERVAL '-1' YEAR", "INTERVAL '-13' MONTH"),
13 -> ("INTERVAL '1-1' YEAR TO MONTH", "INTERVAL '1' YEAR", "INTERVAL '13' MONTH"),
-24 -> ("INTERVAL '-2-0' YEAR TO MONTH", "INTERVAL '-2' YEAR", "INTERVAL '-24' MONTH"),
24 -> ("INTERVAL '2-0' YEAR TO MONTH", "INTERVAL '2' YEAR", "INTERVAL '24' MONTH"),
Int.MinValue ->
("INTERVAL '-178956970-8' YEAR TO MONTH",
"INTERVAL '-178956970' YEAR",
"INTERVAL '-2147483648' MONTH"),
Int.MaxValue ->
("INTERVAL '178956970-7' YEAR TO MONTH",
"INTERVAL '178956970' YEAR",
"INTERVAL '2147483647' MONTH")
).foreach { case (months, (yearToMonth, year, month)) =>
assert(toYearMonthIntervalString(months, ANSI_STYLE, YEAR, MONTH) === yearToMonth)
assert(toYearMonthIntervalString(months, ANSI_STYLE, YEAR, YEAR) === year)
assert(toYearMonthIntervalString(months, ANSI_STYLE, MONTH, MONTH) === month)
}
}
}
|
wangmiao1981/spark
|
sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/util/IntervalUtilsSuite.scala
|
Scala
|
apache-2.0
| 31,570 |
package io.flow.lint
import io.apibuilder.spec.v0.models._
import org.scalatest.funspec.AnyFunSpec
import org.scalatest.matchers.should.Matchers
class EventModelsSpec extends AnyFunSpec with Matchers {
private[this] val linter = linters.EventModels
private[this] def buildService(
fields: Seq[String],
attributes: Seq[Attribute] = Nil
): Service = {
Services.Base.copy(
models = Seq(
Services.buildModel(
name = "org_upserted",
fields = fields.map { case (name) =>
val typ = name match {
case "timestamp" => "date-time-iso8601"
case _ => "string"
}
Services.buildField(name = name, `type` = typ)
}
).copy(
attributes = attributes
)
)
)
}
it("respects linter ignore hint") {
linter.validate(
buildService(
fields = Seq("id"),
attributes = Seq(
Services.buildLinterIgnoreAttribute(Seq("event_model"))
)
)
) should be(Nil)
}
it("no-op w/out event_id") {
linter.validate(buildService(Seq("id", "email"))) should be(
Seq(
"Model org_upserted: event_id must be the first field in event models",
"Model org_upserted: timestamp field is required in event models"
)
)
}
it("fields") {
linter.validate(buildService(Seq("event_id"))) should be(Seq(
"Model org_upserted: timestamp field is required in event models"
))
linter.validate(buildService(Seq("event_id", "foo", "timestamp"))) should be(Seq(
"Model org_upserted: timestamp field must come after event_id in event models"
))
linter.validate(buildService(Seq("event_id", "timestamp"))) should be(Nil)
linter.validate(buildService(Seq("event_id", "timestamp", "number"))) should be(Seq(
"Model org_upserted: organization field is required if event model has a field named number"
))
linter.validate(buildService(Seq("event_id", "timestamp", "organization"))) should be(Nil)
linter.validate(buildService(Seq("event_id", "timestamp", "foo", "organization"))) should be(Seq(
"Model org_upserted: organization field must come after timestamp in event models"
))
linter.validate(buildService(Seq("event_id", "timestamp", "id", "foo", "organization"))) should be(Seq(
"Model org_upserted: organization field must come after id in event models"
))
linter.validate(buildService(Seq("event_id", "timestamp", "id", "organization", "number"))) should be(Nil)
linter.validate(buildService(Seq("event_id", "timestamp", "id", "organization", "number", "foo"))) should be(Nil)
linter.validate(buildService(Seq("event_id", "timestamp", "id", "organization", "foo", "number"))) should be(Seq(
"Model org_upserted: number field must come after organization in event models"
))
linter.validate(buildService(Seq("event_id", "timestamp", "organization", "number"))) should be(Nil)
linter.validate(buildService(Seq("event_id", "timestamp", "organization", "number", "foo"))) should be(Nil)
linter.validate(buildService(Seq("event_id", "timestamp", "organization", "foo", "number"))) should be(Seq(
"Model org_upserted: number field must come after organization in event models"
))
}
}
|
flowcommerce/api-lint
|
src/test/scala/io/flow/lint/EventModelsSpec.scala
|
Scala
|
mit
| 3,313 |
/*
Copyright 2012 Twitter, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.twitter.scalding {
import cascading.operation._
import cascading.tuple._
import cascading.flow._
import cascading.pipe.assembly.AggregateBy
import cascading.pipe._
import com.twitter.chill.MeatLocker
import scala.collection.JavaConverters._
import org.apache.hadoop.conf.Configuration
import com.esotericsoftware.kryo.Kryo;
import com.twitter.algebird.{ Semigroup, SummingCache }
import com.twitter.scalding.mathematics.Poisson
import serialization.Externalizer
trait ScaldingPrepare[C] extends Operation[C] {
abstract override def prepare(flowProcess: FlowProcess[_], operationCall: OperationCall[C]) {
RuntimeStats.addFlowProcess(flowProcess)
super.prepare(flowProcess, operationCall)
}
}
class FlatMapFunction[S, T](@transient fn: S => TraversableOnce[T], fields: Fields,
conv: TupleConverter[S], set: TupleSetter[T])
extends BaseOperation[Any](fields) with Function[Any] with ScaldingPrepare[Any] {
val lockedFn = Externalizer(fn)
def operate(flowProcess: FlowProcess[_], functionCall: FunctionCall[Any]) {
lockedFn.get(conv(functionCall.getArguments)).foreach { arg: T =>
val this_tup = set(arg)
functionCall.getOutputCollector.add(this_tup)
}
}
}
class MapFunction[S, T](@transient fn: S => T, fields: Fields,
conv: TupleConverter[S], set: TupleSetter[T])
extends BaseOperation[Any](fields) with Function[Any] with ScaldingPrepare[Any] {
val lockedFn = Externalizer(fn)
def operate(flowProcess: FlowProcess[_], functionCall: FunctionCall[Any]) {
val res = lockedFn.get(conv(functionCall.getArguments))
functionCall.getOutputCollector.add(set(res))
}
}
/*
The IdentityFunction puts empty nodes in the cascading graph. We use these to nudge the cascading planner
in some edge cases.
*/
object IdentityFunction
extends BaseOperation[Any](Fields.ALL) with Function[Any] with ScaldingPrepare[Any] {
def operate(flowProcess: FlowProcess[_], functionCall: FunctionCall[Any]) {
functionCall.getOutputCollector.add(functionCall.getArguments)
}
}
class CollectFunction[S, T](@transient fn: PartialFunction[S, T], fields: Fields,
conv: TupleConverter[S], set: TupleSetter[T])
extends BaseOperation[Any](fields) with Function[Any] with ScaldingPrepare[Any] {
val lockedFn = Externalizer(fn)
def operate(flowProcess: FlowProcess[_], functionCall: FunctionCall[Any]) {
val partialfn = lockedFn.get
val args = conv(functionCall.getArguments)
if (partialfn.isDefinedAt(args)) {
functionCall.getOutputCollector.add(set(partialfn(args)))
}
}
}
/**
* An implementation of map-side combining which is appropriate for associative and commutative functions
* If a cacheSize is given, it is used, else we query
* the config for cascading.aggregateby.threshold (standard cascading param for an equivalent case)
* else we use a default value of 100,000
*
* This keeps a cache of keys up to the cache-size, summing values as keys collide
* On eviction, or completion of this Operation, the key-value pairs are put into outputCollector.
*
* This NEVER spills to disk and generally never be a performance penalty. If you have
* poor locality in the keys, you just don't get any benefit but little added cost.
*
* Note this means that you may still have repeated keys in the output even on a single mapper
* since the key space may be so large that you can't fit all of them in the cache at the same
* time.
*
* You can use this with the Fields-API by doing:
* {{{
* val msr = new MapsideReduce(Semigroup.from(fn), 'key, 'value, None)
* // MUST map onto the same key,value space (may be multiple fields)
* val mapSideReduced = pipe.eachTo(('key, 'value) -> ('key, 'value)) { _ => msr }
* }}}
* That said, this is equivalent to AggregateBy, and the only value is that it is much simpler than AggregateBy.
* AggregateBy assumes several parallel reductions are happening, and thus has many loops, and array lookups
* to deal with that. Since this does many fewer allocations, and has a smaller code-path it may be faster for
* the typed-API.
*/
class MapsideReduce[V](
@transient commutativeSemigroup: Semigroup[V],
keyFields: Fields, valueFields: Fields,
cacheSize: Option[Int])(implicit conv: TupleConverter[V], set: TupleSetter[V])
extends BaseOperation[SummingCache[Tuple, V]](Fields.join(keyFields, valueFields))
with Function[SummingCache[Tuple, V]]
with ScaldingPrepare[SummingCache[Tuple, V]] {
val boxedSemigroup = Externalizer(commutativeSemigroup)
val DEFAULT_CACHE_SIZE = 100000
val SIZE_CONFIG_KEY = AggregateBy.AGGREGATE_BY_THRESHOLD
def cacheSize(fp: FlowProcess[_]): Int =
cacheSize.orElse {
Option(fp.getStringProperty(SIZE_CONFIG_KEY))
.filterNot { _.isEmpty }
.map { _.toInt }
}
.getOrElse(DEFAULT_CACHE_SIZE)
override def prepare(flowProcess: FlowProcess[_], operationCall: OperationCall[SummingCache[Tuple, V]]) {
//Set up the context:
implicit val sg: Semigroup[V] = boxedSemigroup.get
val cache = SummingCache[Tuple, V](cacheSize(flowProcess))
operationCall.setContext(cache)
}
@inline
private def add(evicted: Option[Map[Tuple, V]], functionCall: FunctionCall[SummingCache[Tuple, V]]) {
// Use iterator and while for optimal performance (avoid closures/fn calls)
if (evicted.isDefined) {
val it = evicted.get.iterator
val tecol = functionCall.getOutputCollector
while (it.hasNext) {
val (key, value) = it.next
// Safe to mutate this key as it is evicted from the map
key.addAll(set(value))
tecol.add(key)
}
}
}
override def operate(flowProcess: FlowProcess[_], functionCall: FunctionCall[SummingCache[Tuple, V]]) {
val cache = functionCall.getContext
val keyValueTE = functionCall.getArguments
// Have to keep a copy of the key tuple because cascading will modify it
val key = keyValueTE.selectEntry(keyFields).getTupleCopy
val value = conv(keyValueTE.selectEntry(valueFields))
add(cache.put(Map(key -> value)), functionCall)
}
override def flush(flowProcess: FlowProcess[_], operationCall: OperationCall[SummingCache[Tuple, V]]) {
// Docs say it is safe to do this cast:
// http://docs.cascading.org/cascading/2.1/javadoc/cascading/operation/Operation.html#flush(cascading.flow.FlowProcess, cascading.operation.OperationCall)
val functionCall = operationCall.asInstanceOf[FunctionCall[SummingCache[Tuple, V]]]
val cache = functionCall.getContext
add(cache.flush, functionCall)
}
override def cleanup(flowProcess: FlowProcess[_], operationCall: OperationCall[SummingCache[Tuple, V]]) {
// The cache may be large, but super sure we drop any reference to it ASAP
// probably overly defensive, but it's super cheap.
operationCall.setContext(null)
}
}
/*
* BaseOperation with support for context
*/
abstract class SideEffectBaseOperation[C](
@transient bf: => C, // begin function returns a context
@transient ef: C => Unit, // end function to clean up context object
fields: Fields) extends BaseOperation[C](fields) with ScaldingPrepare[C] {
val lockedBf = Externalizer(() => bf)
val lockedEf = Externalizer(ef)
override def prepare(flowProcess: FlowProcess[_], operationCall: OperationCall[C]) {
operationCall.setContext(lockedBf.get.apply)
}
override def cleanup(flowProcess: FlowProcess[_], operationCall: OperationCall[C]) {
lockedEf.get(operationCall.getContext)
}
}
/*
* A map function that allows state object to be set up and tear down.
*/
class SideEffectMapFunction[S, C, T](
bf: => C, // begin function returns a context
@transient fn: (C, S) => T, // function that takes a context and a tuple and generate a new tuple
ef: C => Unit, // end function to clean up context object
fields: Fields,
conv: TupleConverter[S],
set: TupleSetter[T]) extends SideEffectBaseOperation[C](bf, ef, fields) with Function[C] {
val lockedFn = Externalizer(fn)
override def operate(flowProcess: FlowProcess[_], functionCall: FunctionCall[C]) {
val context = functionCall.getContext
val s = conv(functionCall.getArguments)
val res = lockedFn.get(context, s)
functionCall.getOutputCollector.add(set(res))
}
}
/*
* A flatmap function that allows state object to be set up and tear down.
*/
class SideEffectFlatMapFunction[S, C, T](
bf: => C, // begin function returns a context
@transient fn: (C, S) => TraversableOnce[T], // function that takes a context and a tuple, returns TraversableOnce of T
ef: C => Unit, // end function to clean up context object
fields: Fields,
conv: TupleConverter[S],
set: TupleSetter[T]) extends SideEffectBaseOperation[C](bf, ef, fields) with Function[C] {
val lockedFn = Externalizer(fn)
override def operate(flowProcess: FlowProcess[_], functionCall: FunctionCall[C]) {
val context = functionCall.getContext
val s = conv(functionCall.getArguments)
lockedFn.get(context, s) foreach { t => functionCall.getOutputCollector.add(set(t)) }
}
}
class FilterFunction[T](@transient fn: T => Boolean, conv: TupleConverter[T])
extends BaseOperation[Any] with Filter[Any] with ScaldingPrepare[Any] {
val lockedFn = Externalizer(fn)
def isRemove(flowProcess: FlowProcess[_], filterCall: FilterCall[Any]) = {
!lockedFn.get(conv(filterCall.getArguments))
}
}
// All the following are operations for use in GroupBuilder
class FoldAggregator[T, X](@transient fn: (X, T) => X, @transient init: X, fields: Fields,
conv: TupleConverter[T], set: TupleSetter[X])
extends BaseOperation[X](fields) with Aggregator[X] with ScaldingPrepare[X] {
val lockedFn = Externalizer(fn)
private val lockedInit = MeatLocker(init)
def initCopy = lockedInit.copy
def start(flowProcess: FlowProcess[_], call: AggregatorCall[X]) {
call.setContext(initCopy)
}
def aggregate(flowProcess: FlowProcess[_], call: AggregatorCall[X]) {
val left = call.getContext
val right = conv(call.getArguments)
call.setContext(lockedFn.get(left, right))
}
def complete(flowProcess: FlowProcess[_], call: AggregatorCall[X]) {
emit(flowProcess, call)
}
def emit(flowProcess: FlowProcess[_], call: AggregatorCall[X]) {
call.getOutputCollector.add(set(call.getContext))
}
}
/*
* fields are the declared fields of this aggregator
*/
class MRMAggregator[T, X, U](
@transient inputFsmf: T => X,
@transient inputRfn: (X, X) => X,
@transient inputMrfn: X => U,
fields: Fields, conv: TupleConverter[T], set: TupleSetter[U])
extends BaseOperation[Tuple](fields) with Aggregator[Tuple] with ScaldingPrepare[Tuple] {
val fsmf = Externalizer(inputFsmf)
val rfn = Externalizer(inputRfn)
val mrfn = Externalizer(inputMrfn)
// The context is a singleton Tuple, which is mutable so
// we don't have to allocate at every step of the loop:
def start(flowProcess: FlowProcess[_], call: AggregatorCall[Tuple]) {
call.setContext(null)
}
def extractArgument(call: AggregatorCall[Tuple]): X = fsmf.get(conv(call.getArguments))
def aggregate(flowProcess: FlowProcess[_], call: AggregatorCall[Tuple]) {
val arg = extractArgument(call)
val ctx = call.getContext
if (null == ctx) {
// Initialize the context, this is the only allocation done by this loop.
val newCtx = Tuple.size(1)
newCtx.set(0, arg.asInstanceOf[AnyRef])
call.setContext(newCtx)
} else {
// Mutate the context:
val oldValue = ctx.getObject(0).asInstanceOf[X]
val newValue = rfn.get(oldValue, arg)
ctx.set(0, newValue.asInstanceOf[AnyRef])
}
}
def complete(flowProcess: FlowProcess[_], call: AggregatorCall[Tuple]) {
val ctx = call.getContext
if (null != ctx) {
val lastValue = ctx.getObject(0).asInstanceOf[X]
// Make sure to drop the reference to the lastValue as soon as possible (it may be big)
call.setContext(null)
call.getOutputCollector.add(set(mrfn.get(lastValue)))
} else {
throw new Exception("MRMAggregator completed without any args")
}
}
}
/**
* This handles the mapReduceMap work on the map-side of the operation. The code below
* attempts to be optimal with respect to memory allocations and performance, not functional
* style purity.
*/
abstract class FoldFunctor[X](fields: Fields) extends AggregateBy.Functor {
// Extend these three methods:
def first(args: TupleEntry): X
def subsequent(oldValue: X, newArgs: TupleEntry): X
def finish(lastValue: X): Tuple
override final def getDeclaredFields = fields
/*
* It's important to keep all state in the context as Cascading seems to
* reuse these objects, so any per instance state might give unexpected
* results.
*/
override final def aggregate(flowProcess: FlowProcess[_], args: TupleEntry, context: Tuple) = {
var nextContext: Tuple = null
val newContextObj = if (null == context) {
// First call, make a new mutable tuple to reduce allocations:
nextContext = Tuple.size(1)
first(args)
} else {
//We are updating
val oldValue = context.getObject(0).asInstanceOf[X]
nextContext = context
subsequent(oldValue, args)
}
nextContext.set(0, newContextObj.asInstanceOf[AnyRef])
//Return context for reuse next time:
nextContext
}
override final def complete(flowProcess: FlowProcess[_], context: Tuple) = {
if (null == context) {
throw new Exception("FoldFunctor completed with any aggregate calls")
} else {
val res = context.getObject(0).asInstanceOf[X]
// Make sure we remove the ref to the context ASAP:
context.set(0, null)
finish(res)
}
}
}
/**
* This handles the mapReduceMap work on the map-side of the operation. The code below
* attempts to be optimal with respect to memory allocations and performance, not functional
* style purity.
*/
class MRMFunctor[T, X](
@transient inputMrfn: T => X,
@transient inputRfn: (X, X) => X,
fields: Fields,
conv: TupleConverter[T], set: TupleSetter[X])
extends FoldFunctor[X](fields) {
val mrfn = Externalizer(inputMrfn)
val rfn = Externalizer(inputRfn)
override def first(args: TupleEntry): X = mrfn.get(conv(args))
override def subsequent(oldValue: X, newArgs: TupleEntry) = {
val right = mrfn.get(conv(newArgs))
rfn.get(oldValue, right)
}
override def finish(lastValue: X) = set(lastValue)
}
/**
* MapReduceMapBy Class
*/
class MRMBy[T, X, U](arguments: Fields,
middleFields: Fields,
declaredFields: Fields,
mfn: T => X,
rfn: (X, X) => X,
mfn2: X => U,
startConv: TupleConverter[T],
midSet: TupleSetter[X],
midConv: TupleConverter[X],
endSet: TupleSetter[U]) extends AggregateBy(
arguments,
new MRMFunctor[T, X](mfn, rfn, middleFields, startConv, midSet),
new MRMAggregator[X, X, U](args => args, rfn, mfn2, declaredFields, midConv, endSet))
class BufferOp[I, T, X](
@transient init: I,
@transient inputIterfn: (I, Iterator[T]) => TraversableOnce[X],
fields: Fields, conv: TupleConverter[T], set: TupleSetter[X])
extends BaseOperation[Any](fields) with Buffer[Any] with ScaldingPrepare[Any] {
val iterfn = Externalizer(inputIterfn)
private val lockedInit = MeatLocker(init)
def initCopy = lockedInit.copy
def operate(flowProcess: FlowProcess[_], call: BufferCall[Any]) {
val oc = call.getOutputCollector
val in = call.getArgumentsIterator.asScala.map { entry => conv(entry) }
iterfn.get(initCopy, in).foreach { x => oc.add(set(x)) }
}
}
/*
* A buffer that allows state object to be set up and tear down.
*/
class SideEffectBufferOp[I, T, C, X](
@transient init: I,
bf: => C, // begin function returns a context
@transient inputIterfn: (I, C, Iterator[T]) => TraversableOnce[X],
ef: C => Unit, // end function to clean up context object
fields: Fields,
conv: TupleConverter[T],
set: TupleSetter[X]) extends SideEffectBaseOperation[C](bf, ef, fields) with Buffer[C] {
val iterfn = Externalizer(inputIterfn)
private val lockedInit = MeatLocker(init)
def initCopy = lockedInit.copy
def operate(flowProcess: FlowProcess[_], call: BufferCall[C]) {
val context = call.getContext
val oc = call.getOutputCollector
val in = call.getArgumentsIterator.asScala.map { entry => conv(entry) }
iterfn.get(initCopy, context, in).foreach { x => oc.add(set(x)) }
}
}
class SampleWithReplacement(frac: Double, val seed: Int = new scala.util.Random().nextInt) extends BaseOperation[Poisson]()
with Function[Poisson] with ScaldingPrepare[Poisson] {
override def prepare(flowProcess: FlowProcess[_], operationCall: OperationCall[Poisson]) {
super.prepare(flowProcess, operationCall)
val p = new Poisson(frac, seed)
operationCall.setContext(p);
}
def operate(flowProcess: FlowProcess[_], functionCall: FunctionCall[Poisson]) {
val r = functionCall.getContext.nextInt
for (i <- 0 until r)
functionCall.getOutputCollector().add(Tuple.NULL)
}
}
/** In the typed API every reduce operation is handled by this Buffer */
class TypedBufferOp[K, V, U](
@transient reduceFn: (K, Iterator[V]) => Iterator[U],
valueField: Fields)
extends BaseOperation[Any](valueField) with Buffer[Any] with ScaldingPrepare[Any] {
val reduceFnSer = Externalizer(reduceFn)
def operate(flowProcess: FlowProcess[_], call: BufferCall[Any]) {
val oc = call.getOutputCollector
val key = call.getGroup.getObject(0).asInstanceOf[K]
val values = call.getArgumentsIterator
.asScala
.map(_.getObject(0).asInstanceOf[V])
// Avoiding a lambda here
val resIter = reduceFnSer.get(key, values)
while (resIter.hasNext) {
val tup = Tuple.size(1)
tup.set(0, resIter.next)
oc.add(tup)
}
}
}
}
|
zirpins/scalding
|
scalding-core/src/main/scala/com/twitter/scalding/Operations.scala
|
Scala
|
apache-2.0
| 19,140 |
package cz.kamenitxan.jakon.utils.security.oauth
import cz.kamenitxan.jakon.core.dynamic.{Get, Pagelet}
import cz.kamenitxan.jakon.logging.Logger
import cz.kamenitxan.jakon.utils.PageContext
import cz.kamenitxan.jakon.utils.Utils._
import cz.kamenitxan.jakon.webui.controller.pagelets.AbstractAdminPagelet
import cz.kamenitxan.jakon.webui.entity.MessageSeverity
import spark.{Request, Response}
import java.sql.Connection
@Pagelet(path = "/admin/login/oauth")
class OauthLoginRouter extends AbstractAdminPagelet {
override val name: String = classOf[OauthLoginRouter].getName
@Get(path = "", template = "")
def get(req: Request, res: Response, conn: Connection) = {
val provider = req.queryParams("provider")
val success = provider match {
case p if p == Google.getClass.getSimpleName =>
Google.handleAuthResponse(req)(conn)
case p =>
Logger.error("Unknown oauth provider: " + p)
false
}
if (success) {
val redirectTo = req.queryParams(OauthProvider.REDIRECT_TO)
res.redirect(redirectTo.getOrElse("/admin/index"))
} else {
PageContext.getInstance().addMessage(MessageSeverity.ERROR, "OAUTH_LOGIN_FAILED")
res.redirect("/admin")
}
}
}
|
kamenitxan/Jakon
|
modules/backend/src/main/scala/cz/kamenitxan/jakon/utils/security/oauth/OauthLoginRouter.scala
|
Scala
|
bsd-3-clause
| 1,184 |
package wakfutcp.protocol.messages.client
import wakfutcp.protocol.{ClientMessage, Codec}
final case class DeleteItemRequestMessage(
itemId: Long
) extends ClientMessage {
override val id = 5261
override val arch = 3
}
object DeleteItemRequestMessage {
import Codec._
import cats.syntax.invariant._
implicit val codec: Codec[DeleteItemRequestMessage] =
long.imap(apply)(Function.unlift(unapply))
}
|
OpenWakfu/wakfutcp
|
protocol/src/main/scala/wakfutcp/protocol/messages/client/DeleteItemRequestMessage.scala
|
Scala
|
mit
| 418 |
package com.github.distributed_stumps.stumps.message.provider
import akka.actor.ActorRef
import com.github.distributed_stumps.stumps.message.common._
case class Register(location: Location, resource: Resource, heartBeatListener: ActorRef)
|
distributed-stumps/stumps-messages
|
src/main/scala/com/github/distributed_stumps/stumps/message/provider/Register.scala
|
Scala
|
apache-2.0
| 241 |
// Code generated by sbt-mavgen. Manual edits will be overwritten
package scavlink.message.ASLUAV
import scavlink.message._
import scavlink.message.enums._
/**
* Voltage and current sensor data
* @param adc121VspbVolt Power board voltage sensor reading in volts
* @param adc121CspbAmp Power board current sensor reading in amps
* @param adc121Cs1Amp Board current sensor 1 reading in amps
* @param adc121Cs2Amp Board current sensor 2 reading in amps
*/
case class SensPower(adc121VspbVolt: Float = 0, adc121CspbAmp: Float = 0, adc121Cs1Amp: Float = 0, adc121Cs2Amp: Float = 0)
extends Message {
val _id = 201
val _name = "SENS_POWER"
val _bundle = Bundle.ASLUAV
override def toString = "SENS_POWER" + " adc121VspbVolt=" + adc121VspbVolt + " adc121CspbAmp=" + adc121CspbAmp + " adc121Cs1Amp=" + adc121Cs1Amp + " adc121Cs2Amp=" + adc121Cs2Amp
}
/**
* Maximum Power Point Tracker (MPPT) sensor data for solar module power performance tracking
* @param mpptTimestamp MPPT last timestamp
* @param mppt1Volt MPPT1 voltage
* @param mppt1Amp MPPT1 current
* @param mppt1Pwm MPPT1 pwm
* @param mppt1Status MPPT1 status
* @param mppt2Volt MPPT2 voltage
* @param mppt2Amp MPPT2 current
* @param mppt2Pwm MPPT2 pwm
* @param mppt2Status MPPT2 status
* @param mppt3Volt MPPT3 voltage
* @param mppt3Amp MPPT3 current
* @param mppt3Pwm MPPT3 pwm
* @param mppt3Status MPPT3 status
*/
case class SensMppt(mpptTimestamp: Long = 0, mppt1Volt: Float = 0, mppt1Amp: Float = 0, mppt1Pwm: Short = 0, mppt1Status: Byte = 0, mppt2Volt: Float = 0, mppt2Amp: Float = 0, mppt2Pwm: Short = 0, mppt2Status: Byte = 0, mppt3Volt: Float = 0, mppt3Amp: Float = 0, mppt3Pwm: Short = 0, mppt3Status: Byte = 0)
extends Message {
val _id = 202
val _name = "SENS_MPPT"
val _bundle = Bundle.ASLUAV
override def toString = "SENS_MPPT" + " mpptTimestamp=" + mpptTimestamp + " mppt1Volt=" + mppt1Volt + " mppt1Amp=" + mppt1Amp + " mppt1Pwm=" + mppt1Pwm + " mppt1Status=" + mppt1Status + " mppt2Volt=" + mppt2Volt + " mppt2Amp=" + mppt2Amp + " mppt2Pwm=" + mppt2Pwm + " mppt2Status=" + mppt2Status + " mppt3Volt=" + mppt3Volt + " mppt3Amp=" + mppt3Amp + " mppt3Pwm=" + mppt3Pwm + " mppt3Status=" + mppt3Status
}
/**
* ASL-fixed-wing controller data
* @param timestamp Timestamp
* @param aslctrlMode ASLCTRL control-mode (manual, stabilized, auto, etc...)
* @param h See sourcecode for a description of these values...
* @param hRef
* @param hRefT
* @param pitchangle Pitch angle [deg]
* @param pitchangleref Pitch angle reference[deg]
* @param q
* @param qRef
* @param uElev
* @param uThrot
* @param uThrot2
* @param aZ
* @param airspeedref Airspeed reference [m/s]
* @param spoilersengaged
* @param yawangle Yaw angle [deg]
* @param yawangleref Yaw angle reference[deg]
* @param rollangle Roll angle [deg]
* @param rollangleref Roll angle reference[deg]
* @param p
* @param pRef
* @param r
* @param rRef
* @param uAil
* @param uRud
*/
case class AslctrlData(timestamp: Long = 0, aslctrlMode: Byte = 0, h: Float = 0, hRef: Float = 0, hRefT: Float = 0, pitchangle: Float = 0, pitchangleref: Float = 0, q: Float = 0, qRef: Float = 0, uElev: Float = 0, uThrot: Float = 0, uThrot2: Float = 0, aZ: Float = 0, airspeedref: Float = 0, spoilersengaged: Byte = 0, yawangle: Float = 0, yawangleref: Float = 0, rollangle: Float = 0, rollangleref: Float = 0, p: Float = 0, pRef: Float = 0, r: Float = 0, rRef: Float = 0, uAil: Float = 0, uRud: Float = 0)
extends Message {
val _id = 203
val _name = "ASLCTRL_DATA"
val _bundle = Bundle.ASLUAV
override def toString = "ASLCTRL_DATA" + " timestamp=" + timestamp + " aslctrlMode=" + aslctrlMode + " h=" + h + " hRef=" + hRef + " hRefT=" + hRefT + " pitchangle=" + pitchangle + " pitchangleref=" + pitchangleref + " q=" + q + " qRef=" + qRef + " uElev=" + uElev + " uThrot=" + uThrot + " uThrot2=" + uThrot2 + " aZ=" + aZ + " airspeedref=" + airspeedref + " spoilersengaged=" + spoilersengaged + " yawangle=" + yawangle + " yawangleref=" + yawangleref + " rollangle=" + rollangle + " rollangleref=" + rollangleref + " p=" + p + " pRef=" + pRef + " r=" + r + " rRef=" + rRef + " uAil=" + uAil + " uRud=" + uRud
}
/**
* ASL-fixed-wing controller debug data
* @param i321 Debug data
* @param i81 Debug data
* @param i82 Debug data
* @param f1 Debug data
* @param f2 Debug data
* @param f3 Debug data
* @param f4 Debug data
* @param f5 Debug data
* @param f6 Debug data
* @param f7 Debug data
* @param f8 Debug data
*/
case class AslctrlDebug(i321: Int = 0, i81: Byte = 0, i82: Byte = 0, f1: Float = 0, f2: Float = 0, f3: Float = 0, f4: Float = 0, f5: Float = 0, f6: Float = 0, f7: Float = 0, f8: Float = 0)
extends Message {
val _id = 204
val _name = "ASLCTRL_DEBUG"
val _bundle = Bundle.ASLUAV
override def toString = "ASLCTRL_DEBUG" + " i321=" + i321 + " i81=" + i81 + " i82=" + i82 + " f1=" + f1 + " f2=" + f2 + " f3=" + f3 + " f4=" + f4 + " f5=" + f5 + " f6=" + f6 + " f7=" + f7 + " f8=" + f8
}
/**
* Extended state information for ASLUAVs
* @param ledStatus Status of the position-indicator LEDs
* @param satcomStatus Status of the IRIDIUM satellite communication system
* @param servoStatus Status vector for up to 8 servos
* @param motorRpm Motor RPM
*/
case class AsluavStatus(ledStatus: Byte = 0, satcomStatus: Byte = 0, servoStatus: Vector[Byte] = Vector.fill(8)(0), motorRpm: Float = 0)
extends Message {
require(servoStatus.length <= 8)
val _id = 205
val _name = "ASLUAV_STATUS"
val _bundle = Bundle.ASLUAV
override def toString = "ASLUAV_STATUS" + " ledStatus=" + ledStatus + " satcomStatus=" + satcomStatus + " servoStatus=" + servoStatus + " motorRpm=" + motorRpm
}
|
nickolasrossi/scavlink
|
src/main/scala/scavlink/message/ASLUAV/Messages.scala
|
Scala
|
mit
| 5,696 |
/*§
===========================================================================
Chronos
===========================================================================
Copyright (C) 2015-2016 Gianluca Costa
===========================================================================
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
===========================================================================
*/
package info.gianlucacosta.chronos.interpreter.io
import info.gianlucacosta.chronos.interpreter.Input
import info.gianlucacosta.chronos.interpreter.atoms.{BooleanAtom, DoubleAtom, IntAtom}
/**
* Simplifies input by implementing most functions in terms of readString(),
* which remains abtract.
*/
abstract class BasicInput extends Input {
override def readDouble(prompt: String): DoubleAtom = {
var result: Option[DoubleAtom] = None
while (result.isEmpty) {
val inputString = readString(prompt)
try {
result = Some(inputString.toDoubleAtom)
} catch {
case _: Exception =>
}
}
result.get
}
override def readInt(prompt: String): IntAtom = {
var result: Option[IntAtom] = None
while (result.isEmpty) {
val inputString = readString(prompt)
try {
result = Some(inputString.toIntAtom)
} catch {
case _: Exception =>
}
}
result.get
}
override def readBoolean(prompt: String): BooleanAtom = {
var result: Option[BooleanAtom] = None
while (result.isEmpty) {
val inputString = readString(prompt)
try {
result = Some(inputString.toBooleanAtom)
} catch {
case _: Exception =>
}
}
result.get
}
}
|
giancosta86/Chronos
|
src/main/scala/info/gianlucacosta/chronos/interpreter/io/BasicInput.scala
|
Scala
|
apache-2.0
| 2,194 |
package com.twitter.inject.internal
import scala.collection.generic.CanBuildFrom
import scala.collection.mutable.{HashSet => MutableHashSet}
import scala.language.higherKinds
private[inject] object iterable {
implicit class RichIterable[Elem, From[Elem] <: Iterable[Elem]](val self: From[Elem])
extends AnyVal {
/**
* Distinct 'iterable' elements using the passed in 'hash' function
* @param hash Hash function to determine unique elements
* @return Distinct elements
*/
def distinctBy[HashCodeType](
hash: Elem => HashCodeType
)(
implicit cbf: CanBuildFrom[From[Elem], Elem, From[Elem]]
): From[Elem] = {
val builder = cbf()
val seen = MutableHashSet[HashCodeType]()
for (elem <- self) {
if (!seen(hash(elem))) {
seen += hash(elem)
builder += elem
}
}
builder.result()
}
}
}
|
twitter/util
|
util-inject/src/main/scala/com/twitter/util/inject/internal/iterable.scala
|
Scala
|
apache-2.0
| 912 |
package com.github.j5ik2o.spetstore
import akka.actor.{ ActorPath, ActorSystem }
import akka.http.scaladsl.Http
import akka.http.scaladsl.server.Directives._
import akka.stream.{ ActorMaterializer, Materializer }
import akka.util.Timeout
import com.github.j5ik2o.spetstore.adaptor.aggregate.{ CustomerMessageBroker, ItemTypeAggregate, ItemTypeMessageBroker }
import com.github.j5ik2o.spetstore.adaptor.eventbus.EventBus
import com.github.j5ik2o.spetstore.usecase.{ CustomerUseCase, ItemTypeUseCase }
import com.typesafe.config.{ Config, ConfigFactory }
import scala.concurrent.ExecutionContext
import scala.concurrent.duration._
object Main extends App with Route with SharedJournalSupport {
val httpInterface = "0.0.0.0"
val httpPort = 8080
val configuration = ConfigFactory.parseString("akka.remote.netty.tcp.port = " + 2551)
.withFallback(ConfigFactory.load())
val clusterPort = configuration.getInt("akka.remote.netty.tcp.port")
private implicit val actorSystem = ActorSystem("ClusterSystem", configuration)
override implicit val executor: ExecutionContext = actorSystem.dispatcher
override implicit val materializer: Materializer = ActorMaterializer()
startupSharedJournal(
startStore = clusterPort == 2551,
path = ActorPath.fromString("akka.tcp://[email protected]:2551/user/store")
)
val eventBus = EventBus.ofRemote(actorSystem)
val customerAggregate = CustomerMessageBroker(eventBus)
val itemTypeAggregate = ItemTypeMessageBroker(eventBus)
override val itemTypeUseCase: ItemTypeUseCase = ItemTypeUseCase(itemTypeAggregate)
override val customerUseCase: CustomerUseCase = CustomerUseCase(customerAggregate)
Http().bindAndHandle(
handler = logRequestResult("log")(routes),
interface = httpInterface,
port = httpPort
)
}
|
j5ik2o/spetstore-cqrs-es-akka
|
akka-http-application/src/main/scala/com/github/j5ik2o/spetstore/Main.scala
|
Scala
|
mit
| 1,815 |
/*
* Copyright (C) 2009-2017 Lightbend Inc. <https://www.lightbend.com>
*/
package play.it.action
import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import play.api.mvc._
import play.api.test.{ FakeRequest, PlaySpecification }
import scala.concurrent.Future
class ContentNegotiationSpec extends PlaySpecification with ControllerHelpers {
implicit val system = ActorSystem()
implicit val mat = ActorMaterializer()
val Action = ActionBuilder.ignoringBody
"rendering" should {
"work with simple results" in {
status(Action { implicit req =>
render {
case Accepts.Json() => Ok
}
}(FakeRequest().withHeaders(ACCEPT -> "application/json"))) must_== 200
}
"work with simple results in an async action" in {
status(Action.async { implicit req =>
Future.successful(render {
case Accepts.Json() => Ok
})
}(FakeRequest().withHeaders(ACCEPT -> "application/json"))) must_== 200
}
"work with async results" in {
status(Action.async { implicit req =>
render.async {
case Accepts.Json() => Future.successful(Ok)
}
}(FakeRequest().withHeaders(ACCEPT -> "application/json"))) must_== 200
}
}
}
|
Shruti9520/playframework
|
framework/src/play-integration-test/src/test/scala/play/it/action/ContentNegotiationSpec.scala
|
Scala
|
apache-2.0
| 1,250 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.runtime.aggfunctions
import java.math.BigDecimal
import java.sql.{Date, Time, Timestamp}
import org.apache.flink.table.functions.AggregateFunction
import org.apache.flink.table.functions.aggfunctions._
/**
* Test case for built-in max with retraction aggregate function
*
* @tparam T the type for the aggregation result
*/
abstract class MaxWithRetractAggFunctionTest[T: Numeric]
extends AggFunctionTestBase[T, MaxWithRetractAccumulator[T]] {
private val numeric: Numeric[T] = implicitly[Numeric[T]]
def minVal: T
def maxVal: T
override def inputValueSets: Seq[Seq[T]] = Seq(
Seq(
numeric.fromInt(1),
null.asInstanceOf[T],
maxVal,
numeric.fromInt(-99),
numeric.fromInt(3),
numeric.fromInt(56),
numeric.fromInt(0),
minVal,
numeric.fromInt(-20),
numeric.fromInt(17),
null.asInstanceOf[T]
),
Seq(
null.asInstanceOf[T],
null.asInstanceOf[T],
null.asInstanceOf[T],
null.asInstanceOf[T],
null.asInstanceOf[T],
null.asInstanceOf[T]
)
)
override def expectedResults: Seq[T] = Seq(
maxVal,
null.asInstanceOf[T]
)
override def retractFunc = aggregator.getClass.getMethod("retract", accType, classOf[Any])
}
class ByteMaxWithRetractAggFunctionTest extends MaxWithRetractAggFunctionTest[Byte] {
override def minVal = (Byte.MinValue + 1).toByte
override def maxVal = (Byte.MaxValue - 1).toByte
override def aggregator: AggregateFunction[Byte, MaxWithRetractAccumulator[Byte]] =
new ByteMaxWithRetractAggFunction()
}
class ShortMaxWithRetractAggFunctionTest extends MaxWithRetractAggFunctionTest[Short] {
override def minVal = (Short.MinValue + 1).toShort
override def maxVal = (Short.MaxValue - 1).toShort
override def aggregator: AggregateFunction[Short, MaxWithRetractAccumulator[Short]] =
new ShortMaxWithRetractAggFunction()
}
class IntMaxWithRetractAggFunctionTest extends MaxWithRetractAggFunctionTest[Int] {
override def minVal = Int.MinValue + 1
override def maxVal = Int.MaxValue - 1
override def aggregator: AggregateFunction[Int, MaxWithRetractAccumulator[Int]] =
new IntMaxWithRetractAggFunction()
}
class LongMaxWithRetractAggFunctionTest extends MaxWithRetractAggFunctionTest[Long] {
override def minVal = Long.MinValue + 1
override def maxVal = Long.MaxValue - 1
override def aggregator: AggregateFunction[Long, MaxWithRetractAccumulator[Long]] =
new LongMaxWithRetractAggFunction()
}
class FloatMaxWithRetractAggFunctionTest extends MaxWithRetractAggFunctionTest[Float] {
override def minVal = Float.MinValue / 2
override def maxVal = Float.MaxValue / 2
override def aggregator: AggregateFunction[Float, MaxWithRetractAccumulator[Float]] =
new FloatMaxWithRetractAggFunction()
}
class DoubleMaxWithRetractAggFunctionTest extends MaxWithRetractAggFunctionTest[Double] {
override def minVal = Double.MinValue / 2
override def maxVal = Double.MaxValue / 2
override def aggregator: AggregateFunction[Double, MaxWithRetractAccumulator[Double]] =
new DoubleMaxWithRetractAggFunction()
}
class BooleanMaxWithRetractAggFunctionTest
extends AggFunctionTestBase[Boolean, MaxWithRetractAccumulator[Boolean]] {
override def inputValueSets: Seq[Seq[Boolean]] = Seq(
Seq(
false,
false,
false
),
Seq(
true,
true,
true
),
Seq(
true,
false,
null.asInstanceOf[Boolean],
true,
false,
true,
null.asInstanceOf[Boolean]
),
Seq(
null.asInstanceOf[Boolean],
null.asInstanceOf[Boolean],
null.asInstanceOf[Boolean]
)
)
override def expectedResults: Seq[Boolean] = Seq(
false,
true,
true,
null.asInstanceOf[Boolean]
)
override def aggregator: AggregateFunction[Boolean, MaxWithRetractAccumulator[Boolean]] =
new BooleanMaxWithRetractAggFunction()
override def retractFunc = aggregator.getClass.getMethod("retract", accType, classOf[Any])
}
class DecimalMaxWithRetractAggFunctionTest
extends AggFunctionTestBase[BigDecimal, MaxWithRetractAccumulator[BigDecimal]] {
override def inputValueSets: Seq[Seq[_]] = Seq(
Seq(
new BigDecimal("1"),
new BigDecimal("1000.000001"),
new BigDecimal("-1"),
new BigDecimal("-999.998999"),
null,
new BigDecimal("0"),
new BigDecimal("-999.999"),
null,
new BigDecimal("999.999")
),
Seq(
null,
null,
null,
null,
null
)
)
override def expectedResults: Seq[BigDecimal] = Seq(
new BigDecimal("1000.000001"),
null
)
override def aggregator: AggregateFunction[BigDecimal, MaxWithRetractAccumulator[BigDecimal]] =
new DecimalMaxWithRetractAggFunction()
override def retractFunc = aggregator.getClass.getMethod("retract", accType, classOf[Any])
}
class StringMaxWithRetractAggFunctionTest
extends AggFunctionTestBase[String, MaxWithRetractAccumulator[String]] {
override def inputValueSets: Seq[Seq[_]] = Seq(
Seq(
"abc",
"def",
"ghi",
null,
"jkl",
null,
"zzz"
),
Seq(
null,
null
),
Seq(
"x",
null,
"e"
)
)
override def expectedResults: Seq[String] = Seq(
"zzz",
null,
"x"
)
override def aggregator: AggregateFunction[String, MaxWithRetractAccumulator[String]] =
new StringMaxWithRetractAggFunction()
override def retractFunc = aggregator.getClass.getMethod("retract", accType, classOf[Any])
}
class TimestampMaxWithRetractAggFunctionTest
extends AggFunctionTestBase[Timestamp, MaxWithRetractAccumulator[Timestamp]] {
override def inputValueSets: Seq[Seq[_]] = Seq(
Seq(
new Timestamp(0),
new Timestamp(1000),
new Timestamp(100),
null.asInstanceOf[Timestamp],
new Timestamp(10)
),
Seq(
null.asInstanceOf[Timestamp],
null.asInstanceOf[Timestamp],
null.asInstanceOf[Timestamp]
)
)
override def expectedResults: Seq[Timestamp] = Seq(
new Timestamp(1000),
null.asInstanceOf[Timestamp]
)
override def aggregator: AggregateFunction[Timestamp, MaxWithRetractAccumulator[Timestamp]] =
new TimestampMaxWithRetractAggFunction()
override def retractFunc = aggregator.getClass.getMethod("retract", accType, classOf[Any])
}
class DateMaxWithRetractAggFunctionTest
extends AggFunctionTestBase[Date, MaxWithRetractAccumulator[Date]] {
override def inputValueSets: Seq[Seq[_]] = Seq(
Seq(
new Date(0),
new Date(1000),
new Date(100),
null.asInstanceOf[Date],
new Date(10)
),
Seq(
null.asInstanceOf[Date],
null.asInstanceOf[Date],
null.asInstanceOf[Date]
)
)
override def expectedResults: Seq[Date] = Seq(
new Date(1000),
null.asInstanceOf[Date]
)
override def aggregator: AggregateFunction[Date, MaxWithRetractAccumulator[Date]] =
new DateMaxWithRetractAggFunction()
override def retractFunc = aggregator.getClass.getMethod("retract", accType, classOf[Any])
}
class TimeMaxWithRetractAggFunctionTest
extends AggFunctionTestBase[Time, MaxWithRetractAccumulator[Time]] {
override def inputValueSets: Seq[Seq[_]] = Seq(
Seq(
new Time(0),
new Time(1000),
new Time(100),
null.asInstanceOf[Time],
new Time(10)
),
Seq(
null.asInstanceOf[Time],
null.asInstanceOf[Time],
null.asInstanceOf[Time]
)
)
override def expectedResults: Seq[Time] = Seq(
new Time(1000),
null.asInstanceOf[Time]
)
override def aggregator: AggregateFunction[Time, MaxWithRetractAccumulator[Time]] =
new TimeMaxWithRetractAggFunction()
override def retractFunc = aggregator.getClass.getMethod("retract", accType, classOf[Any])
}
|
jinglining/flink
|
flink-table/flink-table-planner/src/test/scala/org/apache/flink/table/runtime/aggfunctions/MaxWithRetractAggFunctionTest.scala
|
Scala
|
apache-2.0
| 8,670 |
package com.twitter.finagle.exp.zookeeper.session
import com.twitter.finagle.exp.zookeeper.client.ZkClient
import com.twitter.finagle.exp.zookeeper.session.Session.{PingSender, States}
import com.twitter.finagle.exp.zookeeper.{ConnectRequest, ConnectResponse, ReplyHeader, WatchEvent}
import com.twitter.util.{Duration, Try}
import java.util.concurrent.atomic.AtomicBoolean
/**
* Session manager is used to manage sessions during client life
*/
class SessionManager(canBeRo: Boolean) {
@volatile private[finagle] var session: Session = new Session()
/**
* Build a connect request to create a new session
*
* @return a customized ConnectResponse
*/
def buildConnectRequest(sessionTimeout: Duration): ConnectRequest = {
ConnectRequest(
0,
0L,
sessionTimeout,
0L,
Array[Byte](16),
canBeRo
)
}
/**
* Build a reconnect request depending if RO mode is allowed by user,
* and if current session has a fake session ID ( never connected
* to RW server)
*
* @param sessionTimeout an optional timeout for the session
* @return a customized ConnectResponse
*/
def buildReconnectRequest(
sessionTimeout: Option[Duration] = None
): ConnectRequest = {
val sessionId = {
if (session.hasFakeSessionId.get) 0
else session.id
}
val sessTimeout = sessionTimeout getOrElse session.diseredTimeout
ConnectRequest(
0,
session.lastZxid.get(),
sessTimeout,
sessionId,
session.password,
canBeRo
)
}
def canCloseSession: Boolean = session.canClose
def canCreateSession: Boolean = session.canConnect
def canReconnect: Boolean = session.canReconnect
/**
* To close current session and clean session manager
*/
def closeAndClean() {
session.close()
}
/**
* Used to create a fresh new Session from the connect response.
* Use cases : connection, reconnection with new Session
*
* @param conRep connect Response
* @param sessionTimeout connect request session timeout
* @param pinger function to send ping request
* @return Unit
*/
def newSession(
conRep: ConnectResponse,
sessionTimeout: Duration,
pinger: PingSender) {
ZkClient.logger.info(
"Connected to session with ID: %d".format(conRep.sessionId))
session.stop()
session = new Session(
conRep.sessionId,
conRep.passwd,
sessionTimeout,
conRep.timeOut,
new AtomicBoolean(conRep.isRO),
Some(pinger))
session.init()
}
/**
* Here we are parsing the header's error field
* and changing the connection state if required
* then the ZXID is updated.
*
* @param header request's header
*/
def parseHeader(header: ReplyHeader) {
header.err match {
case 0 => // Ok error code
case -4 =>
session.currentState.set(States.CONNECTION_LOSS)
session.stop()
ZkClient.logger.warning("Received CONNECTION_LOSS event from server")
case -112 =>
session.currentState.set(States.SESSION_EXPIRED)
session.stop()
ZkClient.logger.warning("Session %d has expired".format(session.id))
case -115 =>
session.currentState.set(States.AUTH_FAILED)
session.stop()
ZkClient.logger.warning("Authentication to server has failed. " +
"Connection closed by server.")
case -118 => session.currentState.set(States.SESSION_MOVED)
session.stop()
ZkClient.logger.warning("Session has moved to another server")
case _ =>
}
if (header.zxid > 0) session.lastZxid.set(header.zxid)
}
/**
* Here we are parsing the watchEvent's state field
* and changing the connection state if required
*
* @param event a request header
*/
def parseWatchEvent(event: WatchEvent) {
event.state match {
case -112 =>
session.stop()
session.currentState.set(States.SESSION_EXPIRED)
ZkClient.logger.warning("Session %d has expired".format(session.id))
case 0 =>
session.stop()
session.currentState.set(States.NOT_CONNECTED)
ZkClient.logger.warning("Received NOT_CONNECTED event from server")
case 3 =>
session.isRO.compareAndSet(true, false)
session.hasFakeSessionId.compareAndSet(true, false)
if (session.currentState.get != States.CONNECTED) {
session.currentState.set(States.CONNECTED)
ZkClient.logger.info("Server is now in Read-Write mode")
}
case 4 =>
session.currentState.set(States.AUTH_FAILED)
session.stop()
ZkClient.logger.warning("Authentication to server has failed. " +
"Connection closed by server.")
case 5 =>
session.isRO.compareAndSet(false, true)
if (session.currentState.get != States.CONNECTED_READONLY) {
session.currentState.set(States.CONNECTED_READONLY)
ZkClient.logger.info("Server is now in Read Only mode")
}
case 6 =>
session.currentState.set(States.SASL_AUTHENTICATED)
ZkClient.logger.info("SASL authentication confirmed by server")
case _ =>
}
}
/**
* Used to reconnect with the same session Ids
* Use cases : session reconnection after connection loss,
* reconnection to RW mode server.
*
* @param conReq connect response
* @param pinger function to send ping request
* @return Try[Unit]
*/
def reinit(
conReq: ConnectResponse,
pinger: PingSender
): Try[Unit] = { session.reinit(conReq, pinger) }
}
|
yonglehou/finagle-zookeeper
|
core/src/main/scala/com/twitter/finagle/exp/zookeeper/session/SessionManager.scala
|
Scala
|
apache-2.0
| 5,557 |
/**
* Swaggy Jenkins
* Jenkins API clients generated from Swagger / Open API specification
*
* The version of the OpenAPI document: 1.1.2-pre.0
* Contact: [email protected]
*
* NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
* https://openapi-generator.tech
*/
package org.openapitools.server.model
case class PipelineRunNode(
`class`: Option[String],
displayName: Option[String],
durationInMillis: Option[Int],
edges: Option[List[PipelineRunNodeedges]],
id: Option[String],
result: Option[String],
startTime: Option[String],
state: Option[String]
)
|
cliffano/swaggy-jenkins
|
clients/scalatra/generated/src/main/scala/org/openapitools/server/model/PipelineRunNode.scala
|
Scala
|
mit
| 628 |
package core.authentication
import akka.util.ByteString
import redis.ByteStringFormatter
object UserSerializer {
implicit val byteStringFormatter = new ByteStringFormatter[Identity.User] {
def serialize(data: Identity.User): ByteString = {
ByteString(
data.id + "|" + data.role
)
}
def deserialize(bs: ByteString): Identity.User = {
val r = bs.utf8String.split('|').toList
Identity.User(r(0).toLong, r(1).toInt)
}
}
}
|
piobab/akka-http-rest-api
|
src/main/scala/core/authentication/UserSerializer.scala
|
Scala
|
mit
| 473 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.orc
import java.io.File
import java.nio.charset.StandardCharsets
import java.sql.Timestamp
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.apache.hadoop.mapreduce.{JobID, TaskAttemptID, TaskID, TaskType}
import org.apache.hadoop.mapreduce.lib.input.FileSplit
import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl
import org.apache.orc.{OrcConf, OrcFile}
import org.apache.orc.OrcConf.COMPRESS
import org.apache.orc.mapred.OrcStruct
import org.apache.orc.mapreduce.OrcInputFormat
import org.apache.spark.{SparkConf, SparkException}
import org.apache.spark.sql._
import org.apache.spark.sql.catalyst.TableIdentifier
import org.apache.spark.sql.execution.datasources.{HadoopFsRelation, LogicalRelation, RecordReaderIterator}
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.test.SharedSparkSession
import org.apache.spark.sql.types._
import org.apache.spark.util.Utils
case class AllDataTypesWithNonPrimitiveType(
stringField: String,
intField: Int,
longField: Long,
floatField: Float,
doubleField: Double,
shortField: Short,
byteField: Byte,
booleanField: Boolean,
array: Seq[Int],
arrayContainsNull: Seq[Option[Int]],
map: Map[Int, Long],
mapValueContainsNull: Map[Int, Option[Long]],
data: (Seq[Int], (Int, String)))
case class BinaryData(binaryData: Array[Byte])
case class Contact(name: String, phone: String)
case class Person(name: String, age: Int, contacts: Seq[Contact])
abstract class OrcQueryTest extends OrcTest {
import testImplicits._
test("Read/write All Types") {
val data = (0 to 255).map { i =>
(s"$i", i, i.toLong, i.toFloat, i.toDouble, i.toShort, i.toByte, i % 2 == 0)
}
withOrcFile(data) { file =>
checkAnswer(
spark.read.orc(file),
data.toDF().collect())
}
}
test("Read/write binary data") {
withOrcFile(BinaryData("test".getBytes(StandardCharsets.UTF_8)) :: Nil) { file =>
val bytes = spark.read.orc(file).head().getAs[Array[Byte]](0)
assert(new String(bytes, StandardCharsets.UTF_8) === "test")
}
}
test("Read/write all types with non-primitive type") {
val data: Seq[AllDataTypesWithNonPrimitiveType] = (0 to 255).map { i =>
AllDataTypesWithNonPrimitiveType(
s"$i", i, i.toLong, i.toFloat, i.toDouble, i.toShort, i.toByte, i % 2 == 0,
0 until i,
(0 until i).map(Option(_).filter(_ % 3 == 0)),
(0 until i).map(i => i -> i.toLong).toMap,
(0 until i).map(i => i -> Option(i.toLong)).toMap + (i -> None),
(0 until i, (i, s"$i")))
}
withOrcFile(data) { file =>
checkAnswer(
spark.read.orc(file),
data.toDF().collect())
}
}
test("Read/write UserDefinedType") {
withTempPath { path =>
val data = Seq((1, new TestUDT.MyDenseVector(Array(0.25, 2.25, 4.25))))
val udtDF = data.toDF("id", "vectors")
udtDF.write.orc(path.getAbsolutePath)
val readBack = spark.read.schema(udtDF.schema).orc(path.getAbsolutePath)
checkAnswer(udtDF, readBack)
}
}
test("Creating case class RDD table") {
val data = (1 to 100).map(i => (i, s"val_$i"))
sparkContext.parallelize(data).toDF().createOrReplaceTempView("t")
withTempView("t") {
checkAnswer(sql("SELECT * FROM t"), data.toDF().collect())
}
}
test("Simple selection form ORC table") {
val data = (1 to 10).map { i =>
Person(s"name_$i", i, (0 to 1).map { m => Contact(s"contact_$m", s"phone_$m") })
}
withOrcTable(data, "t") {
// ppd:
// leaf-0 = (LESS_THAN_EQUALS age 5)
// expr = leaf-0
assert(sql("SELECT name FROM t WHERE age <= 5").count() === 5)
// ppd:
// leaf-0 = (LESS_THAN_EQUALS age 5)
// expr = (not leaf-0)
assertResult(10) {
sql("SELECT name, contacts FROM t where age > 5")
.rdd
.flatMap(_.getAs[Seq[_]]("contacts"))
.count()
}
// ppd:
// leaf-0 = (LESS_THAN_EQUALS age 5)
// leaf-1 = (LESS_THAN age 8)
// expr = (and (not leaf-0) leaf-1)
{
val df = sql("SELECT name, contacts FROM t WHERE age > 5 AND age < 8")
assert(df.count() === 2)
assertResult(4) {
df.rdd.flatMap(_.getAs[Seq[_]]("contacts")).count()
}
}
// ppd:
// leaf-0 = (LESS_THAN age 2)
// leaf-1 = (LESS_THAN_EQUALS age 8)
// expr = (or leaf-0 (not leaf-1))
{
val df = sql("SELECT name, contacts FROM t WHERE age < 2 OR age > 8")
assert(df.count() === 3)
assertResult(6) {
df.rdd.flatMap(_.getAs[Seq[_]]("contacts")).count()
}
}
}
}
test("save and load case class RDD with `None`s as orc") {
val data = (
Option.empty[Int],
Option.empty[Long],
Option.empty[Float],
Option.empty[Double],
Option.empty[Boolean]
) :: Nil
withOrcFile(data) { file =>
checkAnswer(
spark.read.orc(file),
Row(Seq.fill(5)(null): _*))
}
}
test("SPARK-16610: Respect orc.compress (i.e., OrcConf.COMPRESS) when compression is unset") {
// Respect `orc.compress` (i.e., OrcConf.COMPRESS).
withTempPath { file =>
spark.range(0, 10).write
.option(COMPRESS.getAttribute, "ZLIB")
.orc(file.getCanonicalPath)
val maybeOrcFile = file.listFiles().find(_.getName.endsWith(".zlib.orc"))
assert(maybeOrcFile.isDefined)
val orcFilePath = new Path(maybeOrcFile.get.getAbsolutePath)
val conf = OrcFile.readerOptions(new Configuration())
Utils.tryWithResource(OrcFile.createReader(orcFilePath, conf)) { reader =>
assert("ZLIB" === reader.getCompressionKind.name)
}
}
// `compression` overrides `orc.compress`.
withTempPath { file =>
spark.range(0, 10).write
.option("compression", "ZLIB")
.option(COMPRESS.getAttribute, "SNAPPY")
.orc(file.getCanonicalPath)
val maybeOrcFile = file.listFiles().find(_.getName.endsWith(".zlib.orc"))
assert(maybeOrcFile.isDefined)
val orcFilePath = new Path(maybeOrcFile.get.getAbsolutePath)
val conf = OrcFile.readerOptions(new Configuration())
Utils.tryWithResource(OrcFile.createReader(orcFilePath, conf)) { reader =>
assert("ZLIB" === reader.getCompressionKind.name)
}
}
}
// Hive supports zlib, snappy and none for Hive 1.2.1.
test("Compression options for writing to an ORC file (SNAPPY, ZLIB and NONE)") {
withTempPath { file =>
spark.range(0, 10).write
.option("compression", "ZLIB")
.orc(file.getCanonicalPath)
val maybeOrcFile = file.listFiles().find(_.getName.endsWith(".zlib.orc"))
assert(maybeOrcFile.isDefined)
val orcFilePath = new Path(maybeOrcFile.get.getAbsolutePath)
val conf = OrcFile.readerOptions(new Configuration())
Utils.tryWithResource(OrcFile.createReader(orcFilePath, conf)) { reader =>
assert("ZLIB" === reader.getCompressionKind.name)
}
}
withTempPath { file =>
spark.range(0, 10).write
.option("compression", "SNAPPY")
.orc(file.getCanonicalPath)
val maybeOrcFile = file.listFiles().find(_.getName.endsWith(".snappy.orc"))
assert(maybeOrcFile.isDefined)
val orcFilePath = new Path(maybeOrcFile.get.getAbsolutePath)
val conf = OrcFile.readerOptions(new Configuration())
Utils.tryWithResource(OrcFile.createReader(orcFilePath, conf)) { reader =>
assert("SNAPPY" === reader.getCompressionKind.name)
}
}
withTempPath { file =>
spark.range(0, 10).write
.option("compression", "NONE")
.orc(file.getCanonicalPath)
val maybeOrcFile = file.listFiles().find(_.getName.endsWith(".orc"))
assert(maybeOrcFile.isDefined)
val orcFilePath = new Path(maybeOrcFile.get.getAbsolutePath)
val conf = OrcFile.readerOptions(new Configuration())
Utils.tryWithResource(OrcFile.createReader(orcFilePath, conf)) { reader =>
assert("NONE" === reader.getCompressionKind.name)
}
}
}
test("simple select queries") {
withOrcTable((0 until 10).map(i => (i, i.toString)), "t") {
checkAnswer(
sql("SELECT `_1` FROM t where t.`_1` > 5"),
(6 until 10).map(Row.apply(_)))
checkAnswer(
sql("SELECT `_1` FROM t as tmp where tmp.`_1` < 5"),
(0 until 5).map(Row.apply(_)))
}
}
test("appending") {
val data = (0 until 10).map(i => (i, i.toString))
spark.createDataFrame(data).toDF("c1", "c2").createOrReplaceTempView("tmp")
withOrcFile(data) { file =>
withTempView("t") {
spark.read.orc(file).createOrReplaceTempView("t")
checkAnswer(spark.table("t"), data.map(Row.fromTuple))
sql("INSERT INTO TABLE t SELECT * FROM tmp")
checkAnswer(spark.table("t"), (data ++ data).map(Row.fromTuple))
}
}
spark.sessionState.catalog.dropTable(
TableIdentifier("tmp"),
ignoreIfNotExists = true,
purge = false)
}
test("overwriting") {
val data = (0 until 10).map(i => (i, i.toString))
spark.createDataFrame(data).toDF("c1", "c2").createOrReplaceTempView("tmp")
withOrcTable(data, "t") {
sql("INSERT OVERWRITE TABLE t SELECT * FROM tmp")
checkAnswer(spark.table("t"), data.map(Row.fromTuple))
}
spark.sessionState.catalog.dropTable(
TableIdentifier("tmp"),
ignoreIfNotExists = true,
purge = false)
}
test("self-join") {
// 4 rows, cells of column 1 of row 2 and row 4 are null
val data = (1 to 4).map { i =>
val maybeInt = if (i % 2 == 0) None else Some(i)
(maybeInt, i.toString)
}
withOrcTable(data, "t") {
val selfJoin = sql("SELECT * FROM t x JOIN t y WHERE x.`_1` = y.`_1`")
val queryOutput = selfJoin.queryExecution.analyzed.output
assertResult(4, "Field count mismatches")(queryOutput.size)
assertResult(2, s"Duplicated expression ID in query plan:\\n $selfJoin") {
queryOutput.filter(_.name == "_1").map(_.exprId).size
}
checkAnswer(selfJoin, List(Row(1, "1", 1, "1"), Row(3, "3", 3, "3")))
}
}
test("nested data - struct with array field") {
val data = (1 to 10).map(i => Tuple1((i, Seq(s"val_$i"))))
withOrcTable(data, "t") {
checkAnswer(sql("SELECT `_1`.`_2`[0] FROM t"), data.map {
case Tuple1((_, Seq(string))) => Row(string)
})
}
}
test("nested data - array of struct") {
val data = (1 to 10).map(i => Tuple1(Seq(i -> s"val_$i")))
withOrcTable(data, "t") {
checkAnswer(sql("SELECT `_1`[0].`_2` FROM t"), data.map {
case Tuple1(Seq((_, string))) => Row(string)
})
}
}
test("columns only referenced by pushed down filters should remain") {
withOrcTable((1 to 10).map(Tuple1.apply), "t") {
checkAnswer(sql("SELECT `_1` FROM t WHERE `_1` < 10"), (1 to 9).map(Row.apply(_)))
}
}
test("SPARK-5309 strings stored using dictionary compression in orc") {
withOrcTable((0 until 1000).map(i => ("same", "run_" + i / 100, 1)), "t") {
checkAnswer(
sql("SELECT `_1`, `_2`, SUM(`_3`) FROM t GROUP BY `_1`, `_2`"),
(0 until 10).map(i => Row("same", "run_" + i, 100)))
checkAnswer(
sql("SELECT `_1`, `_2`, SUM(`_3`) FROM t WHERE `_2` = 'run_5' GROUP BY `_1`, `_2`"),
List(Row("same", "run_5", 100)))
}
}
test("SPARK-9170: Don't implicitly lowercase of user-provided columns") {
withTempPath { dir =>
val path = dir.getCanonicalPath
spark.range(0, 10).select('id as "Acol").write.orc(path)
spark.read.orc(path).schema("Acol")
intercept[IllegalArgumentException] {
spark.read.orc(path).schema("acol")
}
checkAnswer(spark.read.orc(path).select("acol").sort("acol"),
(0 until 10).map(Row(_)))
}
}
test("SPARK-10623 Enable ORC PPD") {
withTempPath { dir =>
withSQLConf(SQLConf.ORC_FILTER_PUSHDOWN_ENABLED.key -> "true") {
import testImplicits._
val path = dir.getCanonicalPath
// For field "a", the first column has odds integers. This is to check the filtered count
// when `isNull` is performed. For Field "b", `isNotNull` of ORC file filters rows
// only when all the values are null (maybe this works differently when the data
// or query is complicated). So, simply here a column only having `null` is added.
val data = (0 until 10).map { i =>
val maybeInt = if (i % 2 == 0) None else Some(i)
val nullValue: Option[String] = None
(maybeInt, nullValue)
}
// It needs to repartition data so that we can have several ORC files
// in order to skip stripes in ORC.
spark.createDataFrame(data).toDF("a", "b").repartition(10).write.orc(path)
val df = spark.read.orc(path)
def checkPredicate(pred: Column, answer: Seq[Row]): Unit = {
val sourceDf = stripSparkFilter(df.where(pred))
val data = sourceDf.collect().toSet
val expectedData = answer.toSet
// When a filter is pushed to ORC, ORC can apply it to rows. So, we can check
// the number of rows returned from the ORC to make sure our filter pushdown work.
// A tricky part is, ORC does not process filter rows fully but return some possible
// results. So, this checks if the number of result is less than the original count
// of data, and then checks if it contains the expected data.
assert(
sourceDf.count < 10 && expectedData.subsetOf(data),
s"No data was filtered for predicate: $pred")
}
checkPredicate('a === 5, List(5).map(Row(_, null)))
checkPredicate('a <=> 5, List(5).map(Row(_, null)))
checkPredicate('a < 5, List(1, 3).map(Row(_, null)))
checkPredicate('a <= 5, List(1, 3, 5).map(Row(_, null)))
checkPredicate('a > 5, List(7, 9).map(Row(_, null)))
checkPredicate('a >= 5, List(5, 7, 9).map(Row(_, null)))
checkPredicate('a.isNull, List(null).map(Row(_, null)))
checkPredicate('b.isNotNull, List())
checkPredicate('a.isin(3, 5, 7), List(3, 5, 7).map(Row(_, null)))
checkPredicate('a > 0 && 'a < 3, List(1).map(Row(_, null)))
checkPredicate('a < 1 || 'a > 8, List(9).map(Row(_, null)))
checkPredicate(!('a > 3), List(1, 3).map(Row(_, null)))
checkPredicate(!('a > 0 && 'a < 3), List(3, 5, 7, 9).map(Row(_, null)))
}
}
}
test("SPARK-14962 Produce correct results on array type with isnotnull") {
withSQLConf(SQLConf.ORC_FILTER_PUSHDOWN_ENABLED.key -> "true") {
val data = (0 until 10).map(i => Tuple1(Array(i)))
withOrcFile(data) { file =>
val actual = spark
.read
.orc(file)
.where("_1 is not null")
val expected = data.toDF()
checkAnswer(actual, expected)
}
}
}
test("SPARK-15198 Support for pushing down filters for boolean types") {
withSQLConf(SQLConf.ORC_FILTER_PUSHDOWN_ENABLED.key -> "true") {
val data = (0 until 10).map(_ => (true, false))
withOrcFile(data) { file =>
val df = spark.read.orc(file).where("_2 == true")
val actual = stripSparkFilter(df).count()
// ORC filter should be applied and the total count should be 0.
assert(actual === 0)
}
}
}
test("Support for pushing down filters for decimal types") {
withSQLConf(SQLConf.ORC_FILTER_PUSHDOWN_ENABLED.key -> "true") {
val data = (0 until 10).map(i => Tuple1(BigDecimal.valueOf(i)))
checkPredicatePushDown(spark.createDataFrame(data).toDF("a"), 10, "a == 2")
}
}
test("Support for pushing down filters for timestamp types") {
withSQLConf(SQLConf.ORC_FILTER_PUSHDOWN_ENABLED.key -> "true") {
val timeString = "2015-08-20 14:57:00"
val data = (0 until 10).map { i =>
val milliseconds = Timestamp.valueOf(timeString).getTime + i * 3600
Tuple1(new Timestamp(milliseconds))
}
checkPredicatePushDown(spark.createDataFrame(data).toDF("a"), 10, s"a == '$timeString'")
}
}
test("column nullability and comment - write and then read") {
val schema = (new StructType)
.add("cl1", IntegerType, nullable = false, comment = "test")
.add("cl2", IntegerType, nullable = true)
.add("cl3", IntegerType, nullable = true)
val row = Row(3, null, 4)
val df = spark.createDataFrame(sparkContext.parallelize(row :: Nil), schema)
val tableName = "tab"
withTable(tableName) {
df.write.format("orc").mode("overwrite").saveAsTable(tableName)
// Verify the DDL command result: DESCRIBE TABLE
checkAnswer(
sql(s"desc $tableName").select("col_name", "comment").where($"comment" === "test"),
Row("cl1", "test") :: Nil)
// Verify the schema
val expectedFields = schema.fields.map(f => f.copy(nullable = true))
assert(spark.table(tableName).schema == schema.copy(fields = expectedFields))
}
}
test("Empty schema does not read data from ORC file") {
val data = Seq((1, 1), (2, 2))
withOrcFile(data) { path =>
val conf = new Configuration()
conf.set(OrcConf.INCLUDE_COLUMNS.getAttribute, "")
conf.setBoolean("hive.io.file.read.all.columns", false)
val orcRecordReader = {
val file = new File(path).listFiles().find(_.getName.endsWith(".snappy.orc")).head
val split = new FileSplit(new Path(file.toURI), 0, file.length, Array.empty[String])
val attemptId = new TaskAttemptID(new TaskID(new JobID(), TaskType.MAP, 0), 0)
val hadoopAttemptContext = new TaskAttemptContextImpl(conf, attemptId)
val oif = new OrcInputFormat[OrcStruct]
oif.createRecordReader(split, hadoopAttemptContext)
}
val recordsIterator = new RecordReaderIterator[OrcStruct](orcRecordReader)
try {
assert(recordsIterator.next().toString == "{null, null}")
} finally {
recordsIterator.close()
}
}
}
test("read from multiple orc input paths") {
val path1 = Utils.createTempDir()
val path2 = Utils.createTempDir()
makeOrcFile((1 to 10).map(Tuple1.apply), path1)
makeOrcFile((1 to 10).map(Tuple1.apply), path2)
val df = spark.read.orc(path1.getCanonicalPath, path2.getCanonicalPath)
assert(df.count() == 20)
}
test("Enabling/disabling ignoreCorruptFiles") {
def testIgnoreCorruptFiles(): Unit = {
withTempDir { dir =>
val basePath = dir.getCanonicalPath
spark.range(1).toDF("a").write.orc(new Path(basePath, "first").toString)
spark.range(1, 2).toDF("a").write.orc(new Path(basePath, "second").toString)
spark.range(2, 3).toDF("a").write.json(new Path(basePath, "third").toString)
val df = spark.read.orc(
new Path(basePath, "first").toString,
new Path(basePath, "second").toString,
new Path(basePath, "third").toString)
checkAnswer(df, Seq(Row(0), Row(1)))
}
}
def testIgnoreCorruptFilesWithoutSchemaInfer(): Unit = {
withTempDir { dir =>
val basePath = dir.getCanonicalPath
spark.range(1).toDF("a").write.orc(new Path(basePath, "first").toString)
spark.range(1, 2).toDF("a").write.orc(new Path(basePath, "second").toString)
spark.range(2, 3).toDF("a").write.json(new Path(basePath, "third").toString)
val df = spark.read.schema("a long").orc(
new Path(basePath, "first").toString,
new Path(basePath, "second").toString,
new Path(basePath, "third").toString)
checkAnswer(df, Seq(Row(0), Row(1)))
}
}
def testAllCorruptFiles(): Unit = {
withTempDir { dir =>
val basePath = dir.getCanonicalPath
spark.range(1).toDF("a").write.json(new Path(basePath, "first").toString)
spark.range(1, 2).toDF("a").write.json(new Path(basePath, "second").toString)
val df = spark.read.orc(
new Path(basePath, "first").toString,
new Path(basePath, "second").toString)
assert(df.count() == 0)
}
}
def testAllCorruptFilesWithoutSchemaInfer(): Unit = {
withTempDir { dir =>
val basePath = dir.getCanonicalPath
spark.range(1).toDF("a").write.json(new Path(basePath, "first").toString)
spark.range(1, 2).toDF("a").write.json(new Path(basePath, "second").toString)
val df = spark.read.schema("a long").orc(
new Path(basePath, "first").toString,
new Path(basePath, "second").toString)
assert(df.count() == 0)
}
}
withSQLConf(SQLConf.IGNORE_CORRUPT_FILES.key -> "true") {
testIgnoreCorruptFiles()
testIgnoreCorruptFilesWithoutSchemaInfer()
val m1 = intercept[AnalysisException] {
testAllCorruptFiles()
}.getMessage
assert(m1.contains("Unable to infer schema for ORC"))
testAllCorruptFilesWithoutSchemaInfer()
}
withSQLConf(SQLConf.IGNORE_CORRUPT_FILES.key -> "false") {
val m1 = intercept[SparkException] {
testIgnoreCorruptFiles()
}.getMessage
assert(m1.contains("Malformed ORC file"))
val m2 = intercept[SparkException] {
testIgnoreCorruptFilesWithoutSchemaInfer()
}.getMessage
assert(m2.contains("Malformed ORC file"))
val m3 = intercept[SparkException] {
testAllCorruptFiles()
}.getMessage
assert(m3.contains("Could not read footer for file"))
val m4 = intercept[SparkException] {
testAllCorruptFilesWithoutSchemaInfer()
}.getMessage
assert(m4.contains("Malformed ORC file"))
}
}
test("SPARK-27160 Predicate pushdown correctness on DecimalType for ORC") {
withTempPath { dir =>
withSQLConf(SQLConf.ORC_FILTER_PUSHDOWN_ENABLED.key -> "true") {
val path = dir.getCanonicalPath
Seq(BigDecimal(0.1), BigDecimal(0.2), BigDecimal(-0.3))
.toDF("x").write.orc(path)
val df = spark.read.orc(path)
checkAnswer(df.filter("x >= 0.1"), Seq(Row(0.1), Row(0.2)))
checkAnswer(df.filter("x > 0.1"), Seq(Row(0.2)))
checkAnswer(df.filter("x <= 0.15"), Seq(Row(0.1), Row(-0.3)))
checkAnswer(df.filter("x < 0.1"), Seq(Row(-0.3)))
checkAnswer(df.filter("x == 0.2"), Seq(Row(0.2)))
}
}
}
}
abstract class OrcQuerySuite extends OrcQueryTest with SharedSparkSession {
import testImplicits._
test("LZO compression options for writing to an ORC file") {
withTempPath { file =>
spark.range(0, 10).write
.option("compression", "LZO")
.orc(file.getCanonicalPath)
val maybeOrcFile = file.listFiles().find(_.getName.endsWith(".lzo.orc"))
assert(maybeOrcFile.isDefined)
val orcFilePath = new Path(maybeOrcFile.get.getAbsolutePath)
val conf = OrcFile.readerOptions(new Configuration())
Utils.tryWithResource(OrcFile.createReader(orcFilePath, conf)) { reader =>
assert("LZO" === reader.getCompressionKind.name)
}
}
}
test("Schema discovery on empty ORC files") {
// SPARK-8501 is fixed.
withTempPath { dir =>
val path = dir.getCanonicalPath
withTable("empty_orc") {
withTempView("empty", "single") {
spark.sql(
s"""CREATE TABLE empty_orc(key INT, value STRING)
|USING ORC
|LOCATION '${dir.toURI}'
""".stripMargin)
val emptyDF = Seq.empty[(Int, String)].toDF("key", "value").coalesce(1)
emptyDF.createOrReplaceTempView("empty")
// This creates 1 empty ORC file with ORC SerDe. We are using this trick because
// Spark SQL ORC data source always avoids write empty ORC files.
spark.sql(
s"""INSERT INTO TABLE empty_orc
|SELECT key, value FROM empty
""".stripMargin)
val df = spark.read.orc(path)
assert(df.schema === emptyDF.schema.asNullable)
checkAnswer(df, emptyDF)
}
}
}
}
test("SPARK-21791 ORC should support column names with dot") {
withTempDir { dir =>
val path = new File(dir, "orc").getCanonicalPath
Seq(Some(1), None).toDF("col.dots").write.orc(path)
assert(spark.read.orc(path).collect().length == 2)
}
}
test("SPARK-25579 ORC PPD should support column names with dot") {
withSQLConf(SQLConf.ORC_FILTER_PUSHDOWN_ENABLED.key -> "true") {
checkPredicatePushDown(spark.range(10).toDF("col.dot"), 10, "`col.dot` == 2")
}
}
test("SPARK-20728 Make ORCFileFormat configurable between sql/hive and sql/core") {
withSQLConf(SQLConf.ORC_IMPLEMENTATION.key -> "hive") {
val e = intercept[AnalysisException] {
sql("CREATE TABLE spark_20728(a INT) USING ORC")
}
assert(e.message.contains("Hive built-in ORC data source must be used with Hive support"))
}
withSQLConf(SQLConf.ORC_IMPLEMENTATION.key -> "native") {
withTable("spark_20728") {
sql("CREATE TABLE spark_20728(a INT) USING ORC")
val fileFormat = sql("SELECT * FROM spark_20728").queryExecution.analyzed.collectFirst {
case l: LogicalRelation => l.relation.asInstanceOf[HadoopFsRelation].fileFormat.getClass
}
assert(fileFormat == Some(classOf[OrcFileFormat]))
}
}
}
}
class OrcV1QuerySuite extends OrcQuerySuite {
override protected def sparkConf: SparkConf =
super
.sparkConf
.set(SQLConf.USE_V1_SOURCE_LIST, "orc")
}
class OrcV2QuerySuite extends OrcQuerySuite {
override protected def sparkConf: SparkConf =
super
.sparkConf
.set(SQLConf.USE_V1_SOURCE_LIST, "")
}
|
goldmedal/spark
|
sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/orc/OrcQuerySuite.scala
|
Scala
|
apache-2.0
| 26,625 |
package com.payalabs.scalajs.react.bridge.elements
import com.payalabs.scalajs.react.bridge.ReactBridgeComponent
import japgolly.scalajs.react.Callback
import scala.scalajs.js
/**
* Bridge to [ReactMediumEditor](https://github.com/wangzuo/react-medium-editor)'s component
*/
case class ReactMediumEditor(id: js.UndefOr[String] = js.undefined, className: js.UndefOr[String] = js.undefined,
ref: js.UndefOr[String] = js.undefined, key: js.UndefOr[Any] = js.undefined,
text: js.UndefOr[String] = js.undefined,
onChange: js.UndefOr[String => Callback] = js.undefined,
options: js.UndefOr[Map[String, Any]] = js.undefined)
extends ReactBridgeComponent
|
payalabs/scalajs-react-bridge-example
|
src/main/scala/com/payalabs/scalajs/react/bridge/elements/ReactMediumEditor.scala
|
Scala
|
mit
| 771 |
package scala.reflect.macros
package contexts
import scala.tools.nsc.reporters.StoreReporter
trait Parsers {
self: Context =>
import global._
def parse(code: String) = {
val sreporter = new StoreReporter()
val oldReporter = global.reporter
try {
global.reporter = sreporter
val parser = newUnitParser(new CompilationUnit(newSourceFile(code, "<macro>")))
val tree = gen.mkTreeOrBlock(parser.parseStatsOrPackages())
sreporter.infos.foreach {
case sreporter.Info(pos, msg, sreporter.ERROR) => throw ParseException(pos, msg)
case _ =>
}
tree
} finally global.reporter = oldReporter
}
}
|
felixmulder/scala
|
src/compiler/scala/reflect/macros/contexts/Parsers.scala
|
Scala
|
bsd-3-clause
| 661 |
package teststate.external
import teststate.Exports._
case class X1(i: Int)
case class X2(i: Int)
case class X3(i: Int)
object X3 {
implicit def displayX3: Display[X3] = Display("X3=" + _.i)
}
|
japgolly/test-state
|
core/shared/src/test/scala/teststate/external/Xs.scala
|
Scala
|
apache-2.0
| 198 |
package scala.offheap
import scala.language.experimental.{macros => canMacro}
import offheap.internal.macros
import offheap.internal.Sanitizer
import offheap.internal.Checked
/** Family of scoped memory allocators. Allocated memory
* is available as long as execution is still in given
* region scope and is cleaned up once it's done
*
* A few memory management implemenations are available.
* It's possible to pick the desirable implementation using
* an implicit instance of `Region.Props`.
*/
trait Region extends Allocator {
protected val id: Long =
if (Checked.MEMORY) Sanitizer.register()
else 0L
protected def checkOpen(): Unit =
if (!isOpen)
throw new IllegalArgumentException(s"$this has already been closed")
protected def wrap(addr: Addr): Addr = {
if (Checked.MEMORY) Sanitizer.pack(this.id, addr)
else addr
}
def isOpen: Boolean
def close(): Unit = {
checkOpen
if (Checked.MEMORY) Sanitizer.unregister(id)
}
def reallocate(addr: Addr, size: Size): Addr =
throw new UnsupportedOperationException
def free(addr: Addr): Unit =
throw new UnsupportedOperationException
}
object Region {
/** Object that contains the configuration information necessary to
* open a region. Used as a way to implicitly define which
* region implementation strategies to pick in given scope.
*/
trait Props { def open(): Region }
object Props {
def apply(pool: Pool = Pool()) = PoolRegion.Props(pool)
def direct(alloc: Allocator = malloc) = DirectRegion.Props(alloc)
}
def open(implicit props: Props): Region = props.open
def apply[T](f: Region => T)(implicit props: Props): T = macro macros.Region.apply
}
|
ignasi35/scala-offheap
|
core/src/main/scala/offheap/Region.scala
|
Scala
|
bsd-3-clause
| 1,702 |
/******************************************************************************************************************\\
* Rapture Core, version 2.0.0. Copyright 2010-2015 Jon Pretty, Propensive Ltd. *
* *
* The primary distribution site is http://rapture.io/ *
* *
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in complance *
* with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0. *
* *
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed *
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License *
* for the specific language governing permissions and limitations under the License. *
\\******************************************************************************************************************/
package rapture.core
import language.experimental.macros
object AssignedName {
implicit def assignedNameImplicit: AssignedName = macro CoreMacros.assignedNameMacro
}
class AssignedName(val name: String) extends AnyVal
object MethodName {
implicit def assignedMethodNameImplicit: MethodName = macro CoreMacros.assignedMethodNameMacro
}
class MethodName(val name: String) extends AnyVal
trait Cell[T] {
def apply(): T
def update(t: T): Unit
}
object Cell {
def apply[T](get: => T)(set: T => Unit): Cell[T] = new Cell[T] {
def apply() = get
def update(t: T) = set(t)
}
}
object Var {
def apply[T](t: T) = new Cell[T] {
private var value = t
def apply(): T = value
def update(t: T) = value = t
}
}
|
utaal/rapture-core
|
src/core.scala
|
Scala
|
apache-2.0
| 2,140 |
/*
* Copyright (C) 2017 Radicalbit
*
* This file is part of flink-JPMML
*
* flink-JPMML is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* flink-JPMML is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with flink-JPMML. If not, see <http://www.gnu.org/licenses/>.
*/
package io.radicalbit.examples.util
import io.radicalbit.examples.model.Utils
import io.radicalbit.examples.sources.ControlSource
import org.apache.flink.api.java.utils.ParameterTool
object DynamicParams {
def fromParameterTool(params: ParameterTool): DynamicParams = {
val outputPath = params.getRequired("output")
val pathsAndIds = retrievePathsAndIds(params.getRequired("models"))
val policy = computeGenPolicy(params.get("gen-policy", "random"))
val availableIdModels = computeAvailableIds(pathsAndIds)
val intervalCheckpoint = params.get("intervalCheckpoint", 1000.toString).toLong
val maxIntervalControlStream = params.get("maxIntervalControlStream", 5000L.toString).toLong
DynamicParams(outputPath, policy, pathsAndIds, availableIdModels, intervalCheckpoint, maxIntervalControlStream)
}
private def retrievePathsAndIds(paths: String) = {
val rawModelsPaths = paths.split(",")
Utils.retrieveMappingIdPath(rawModelsPaths)
}
private def computeGenPolicy(rawPolicy: String) =
rawPolicy match {
case "random" => ControlSource.Random
case "loop" => ControlSource.Loop
case "finite" => ControlSource.Finite
case _ => throw new IllegalArgumentException(s"$rawPolicy is not recognized generation policy.")
}
private def computeAvailableIds(pathsAndIds: Map[String, String]) =
Utils.retrieveAvailableId(pathsAndIds)
}
case class DynamicParams(outputPath: String,
genPolicy: ControlSource.Mode,
pathAndIds: Map[String, String],
availableIds: Seq[String],
ckpInterval: Long,
ctrlGenInterval: Long)
|
francescofrontera/flink-jpmml
|
flink-jpmml-examples/src/main/scala/io/radicalbit/examples/util/DynamicParams.scala
|
Scala
|
agpl-3.0
| 2,465 |
/*
* Copyright 2017 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.accounts.frsse2008.boxes
import uk.gov.hmrc.ct.box._
case class ACQ8999(value: Option[Boolean]) extends CtBoxIdentifier(name = "The company was dormant.")
with CtOptionalBoolean
with Input
|
liquidarmour/ct-calculations
|
src/main/scala/uk/gov/hmrc/ct/accounts/frsse2008/boxes/ACQ8999.scala
|
Scala
|
apache-2.0
| 824 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.openwhisk.core.database.memory
import org.junit.runner.RunWith
import org.scalatest.FlatSpec
import org.scalatest.junit.JUnitRunner
import org.apache.openwhisk.core.database.test.behavior.ArtifactStoreBehavior
@RunWith(classOf[JUnitRunner])
class MemoryArtifactStoreTests extends FlatSpec with MemoryArtifactStoreBehaviorBase with ArtifactStoreBehavior
|
starpit/openwhisk
|
tests/src/test/scala/org/apache/openwhisk/core/database/memory/MemoryArtifactStoreTests.scala
|
Scala
|
apache-2.0
| 1,177 |
package net.sansa_stack.rdf.flink.io
import org.apache.flink.api.scala.ExecutionEnvironment
import org.apache.jena.riot.Lang
import org.scalatest.FunSuite
class FlinkRDFLoadingTests extends FunSuite {
val env = ExecutionEnvironment.getExecutionEnvironment
test("loading N-Triples file into DataSet should match") {
val path = getClass.getResource("/data.nt").getPath
val lang: Lang = Lang.NTRIPLES
val triples = env.rdf(lang)(path)
val cnt = triples.count()
assert(cnt == 106)
}
test("loading N-Quads file into DataSet should match") {
val path = getClass.getResource("/data.nq").getPath
val lang: Lang = Lang.NQUADS
val triples = env.rdf(lang)(path)
val cnt = triples.count()
assert(cnt == 28)
}
test("loading RDF/XML file into DataSet should match") {
val path = getClass.getResource("/data.rdf").getPath
val lang: Lang = Lang.RDFXML
val triples = env.rdf(lang)(path)
val cnt = triples.count()
assert(cnt == 9)
}
}
|
SANSA-Stack/Spark-RDF
|
sansa-rdf-flink/src/test/scala/net/sansa_stack/rdf/flink/io/FlinkRDFLoadingTests.scala
|
Scala
|
gpl-3.0
| 1,009 |
/**
* Created on: Feb 22, 2013
*/
package com.tubros.constraints.core.internal
package tree
import Predef.{
any2stringadd => _,
_
}
import scalaz._
import org.junit.runner.RunWith
import org.scalatest._
import org.scalatest.junit.JUnitRunner
import com.tubros.constraints.api._
import com.tubros.constraints.api.problem._
import com.tubros.constraints.api.solver._
import com.tubros.constraints.api.solver.error._
import com.tubros.constraints.core.spi.solver._
/**
* The '''TreeFiniteDomainSolverSpec''' type defines the unit tests which
* certify the
* [[com.tubros.constraints.core.internal.graph.TreeFiniteDomainSolver]] for
* use as a CSP [[com.tubros.constraints.api.solver.Solver]].
*
* @author svickers
*
*/
@RunWith (classOf[JUnitRunner])
class TreeFiniteDomainSolverSpec
extends ProjectSpec
{
/// Class Imports
import scalaz.std.AllInstances._
import algebraic._
import TreeFiniteDomainSolverSpec.PolynomialEquation
/// Testing Collaborators
val rankingPolicy =
ImpactRankingPolicy[Int] () andThen PreferSmallerDomain[Int] ();
"A TreeFiniteDomainSolver" should "be able to be constructed" in
{
val aSolver = new TreeFiniteDomainSolver[Int] (rankingPolicy);
}
it should "produce a cartesian product with no constraints" in
{
val solver = new TreeFiniteDomainSolver[Int] (rankingPolicy);
val domain = FiniteDiscreteDomain (1 to 10);
val answer = solver {
s =>
for {
_ <- s.newVar ('a, domain)
_ <- s.newVar ('b, domain)
_ <- s.newVar ('c, domain)
stream <- s.run[Vector]
} yield stream;
}.valueOr (_ => Stream.empty);
val expected = for {
a <- domain
b <- domain
c <- domain
} yield (a, b, c);
answer should have size (domain.size * domain.size * domain.size);
expected foreach {
case (a, b, c) =>
answer.contains (
List (
Answer ('a, a),
Answer ('b, b),
Answer ('c, c)
)
) shouldBe (true);
}
}
it should "employ constraint propagation" in
{
val problem = Problem (
new PolynomialEquation[Int] {
def apply = 'y @== 'x ** 3
},
new PolynomialEquation[Int] {
def apply = 'x @== 2
}
);
val solver = new TreeFiniteDomainSolver[Int] (rankingPolicy);
val domain = FiniteDiscreteDomain (1 to 1000);
val solution = solver {
s =>
for {
_ <- s.newVar ('x, domain)
_ <- s.newVar ('y, domain)
_ <- s.add (problem)
stream <- s.run[Vector]
} yield stream;
}
solution should be ('right);
solution foreach {
answer =>
answer should not be ('empty);
answer should have size (1);
answer.head shouldBe (Vector (Answer ('x, 2), Answer ('y, 8)));
}
}
it should "detect when no variables are provided" in
{
val problem = Problem (
new PolynomialEquation[Int] {
def apply = 'a > 'b;
}
);
val solver = new TreeFiniteDomainSolver[Int] (rankingPolicy);
val solution = solver {
s =>
for {
_ <- s.add (problem)
stream <- s.run[Vector]
} yield stream;
}
solution should be ('left);
}
it should "detect when variables are referenced but not provided" in
{
val problem = Problem (
new PolynomialEquation[Int] {
def apply = 'a > 'b;
}
);
val solver = new TreeFiniteDomainSolver[Int] (rankingPolicy);
val domain = FiniteDiscreteDomain (1 to 1000);
val solution = solver {
s =>
for {
x <- s.newVar ('x, domain)
ys <- s.newArrayVar ('y, 20, domain)
_ <- s.add (problem)
stream <- s.run[Vector]
} yield stream;
}
solution should be ('left);
}
it should "produce a list of Variables when newArrayVar is called" in
{
val solver = new TreeFiniteDomainSolver[Int] (rankingPolicy);
val domain = FiniteDiscreteDomain (1 to 1000);
val array = solver.newArrayVar ('a, 10, domain).eval (
VariableStore.empty[Int]
);
array should be ('right);
array foreach {
definitions =>
definitions should have size (10);
}
}
it should "allow for Variables having a single value in their domain" in
{
val problem = Problem (
new PolynomialEquation[Int] {
def apply = 'x > 0
},
new PolynomialEquation[Int] {
def apply = 'x < 'y
}
);
val solver = new TreeFiniteDomainSolver[Int] (
ImpactRankingPolicy[Int] ()
);
val singleValue = FiniteDiscreteDomain (Seq (1));
val tenValues = FiniteDiscreteDomain (1 to 10);
val solution = solver {
s =>
for {
_ <- s.newVar ('x, singleValue)
_ <- s.newVar ('y, tenValues)
_ <- s.add (problem)
stream <- s.run[Vector]
} yield stream;
}
solution should be ('right);
solution foreach {
answer =>
answer should not be ('empty);
answer.force should have size (9);
}
}
}
object TreeFiniteDomainSolverSpec
{
/// Class Types
trait PolynomialEquation[T]
extends Equation[T]
with ArithmeticSupport[T]
with PropositionalSupport[T]
with RelationalSupport[T]
}
|
osxhacker/smocs
|
smocs-core/src/test/scala/com/tubros/constraints/core/internal/tree/TreeFiniteDomainSolverSpec.scala
|
Scala
|
bsd-3-clause
| 4,959 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.