code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.samza.config
import scala.collection.JavaConverters._
import org.apache.samza.util.Logging
/**
* Note: All new methods are being added to [[org.apache.samza.config.JavaSystemConfig]]
*/
object SystemConfig {
// system config constants
val SYSTEM_PREFIX = JavaSystemConfig.SYSTEM_PREFIX + "%s."
val SYSTEM_FACTORY = JavaSystemConfig.SYSTEM_FACTORY_FORMAT
val CONSUMER_OFFSET_DEFAULT = SYSTEM_PREFIX + "samza.offset.default"
implicit def Config2System(config: Config) = new SystemConfig(config)
}
class SystemConfig(config: Config) extends ScalaMapConfig(config) with Logging {
val javaSystemConfig = new JavaSystemConfig(config)
def getSystemFactory(name: String) = Option(javaSystemConfig.getSystemFactory(name))
def getSystemKeySerde(name: String) = getSystemDefaultStreamProperty(name, StreamConfig.KEY_SERDE)
def getSystemMsgSerde(name: String) = getSystemDefaultStreamProperty(name, StreamConfig.MSG_SERDE)
def getDefaultSystemOffset(systemName: String) = getOption(SystemConfig.CONSUMER_OFFSET_DEFAULT format (systemName))
/**
* Returns a list of all system names from the config file. Useful for
* getting individual systems.
*/
def getSystemNames() = javaSystemConfig.getSystemNames().asScala
private def getSystemDefaultStreamProperty(name: String, property: String) = {
val defaultStreamProperties = javaSystemConfig.getDefaultStreamProperties(name)
val streamDefault = defaultStreamProperties.get(property)
if (!(streamDefault == null || streamDefault.isEmpty)) {
Option(streamDefault)
} else {
getNonEmptyOption((SystemConfig.SYSTEM_PREFIX + property) format name)
}
}
}
|
TiVo/samza
|
samza-core/src/main/scala/org/apache/samza/config/SystemConfig.scala
|
Scala
|
apache-2.0
| 2,493 |
package hello
/** Hello, world! */
object Hello {
def main(args: Array[String]): Unit = {
val dotty: Int | String = "dotty"
println(s"Hello $dotty!")
}
}
|
dotty-staging/dotty
|
sbt-test/sbt-dotty/scaladoc/src/main/scala/hello/Hello.scala
|
Scala
|
apache-2.0
| 166 |
/* Try POH
* author: Leonardone @ NEETSDKASU
*/
object Main extends App {
def gs() = scala.io.StdIn.readLine
def gi() = gs.toInt
def gss() = gs.split(' ')
def gis() = gss.map( _.toInt )
def ngt[T](n :Int, f: ()=>T) = (1 to n).map( (_) => f() )
def ngs(n :Int) = ngt(n, gs)
def ngi(n :Int) = ngt(n, gi)
def ngss(n :Int) = ngt(n, gss)
def ngis(n :Int) = ngt(n, gis)
val n = gi
val q = ngis(n)
val m = gi
val p = ngis(m)
for (i <- 0 to n - m)
for (j <- 0 to n - m)
if ( (0 until m).flatMap( (y) => (0 until m).map( (x) => q(i+y)(j+x) == p(y)(x) ) ).reduce( _ & _ ) )
Console.println(List(i, j).mkString(" "))
}
|
neetsdkasu/Paiza-POH-MyAnswers
|
POH7/Megane/Main.scala
|
Scala
|
mit
| 717 |
package com.twitter.web.dashboard
import com.google.inject.Module
import com.twitter.finatra.http.HttpServer
import com.twitter.finatra.http.filters.CommonFilters
import com.twitter.finatra.http.routing.HttpRouter
import com.twitter.finatra.http.modules.MustacheModule
import com.twitter.web.dashboard.controllers.DashboardController
object ServerMain extends Server
class Server extends HttpServer {
override val name = "dashboard"
override val modules: Seq[Module] = Seq(MustacheModule)
override def configureHttp(router: HttpRouter): Unit = {
router
.filter[CommonFilters]
.add[DashboardController]
}
}
|
twitter/finatra
|
examples/advanced/web-dashboard/src/main/scala/com/twitter/web/dashboard/Server.scala
|
Scala
|
apache-2.0
| 635 |
/**
* Copyright 2011-2017 GatlingCorp (http://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.commons.util
import java.nio.charset.Charset
object NonStandardCharsets {
val UTF_32 = Charset.forName("UTF-32")
}
|
MykolaB/gatling
|
gatling-commons/src/main/scala/io/gatling/commons/util/NonStandardCharsets.scala
|
Scala
|
apache-2.0
| 757 |
package com.eevolution.context.dictionary.infrastructure.repository
import com.eevolution.context.dictionary.domain.model.SchedulerParameter
import com.eevolution.context.dictionary.infrastructure.db.DbContext._
/**
* Copyright (C) 2003-2017, e-Evolution Consultants S.A. , http://www.e-evolution.com
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
* Email: [email protected], http://www.e-evolution.com , http://github.com/e-Evolution
* Created by [email protected] , www.e-evolution.com
*/
/**
* Scheduler Parameter Mapping
*/
trait SchedulerParameterMapping {
val querySchedulerParameter = quote {
querySchema[SchedulerParameter]("AD_Scheduler_Para",
_.schedulerId -> "AD_Scheduler_ID",
_.processParameterId -> "AD_process_Para_ID",
_.tenantId -> "AD_Client_ID",
_.organizationId-> "AD_Org_ID",
_.isActive -> "IsActive",
_.created -> "Created",
_.createdBy -> "CreatedBy",
_.updated -> "Updated",
_.updatedBy -> "UpdatedBy",
_.parameterDefault -> "ParameterDefault",
_.description -> "Description",
_.uuid -> "UUID")
}
}
|
adempiere/ADReactiveSystem
|
dictionary-impl/src/main/scala/com/eevolution/context/dictionary/infrastructure/repository/SchedulerParameterMapping.scala
|
Scala
|
gpl-3.0
| 1,748 |
package poly.collection
trait RangeGroupQueryable[T] extends RangeMonoidQueryable[T] {
def group: Group[T]
def monoid = group
}
|
ctongfei/poly-collection
|
rangequery/src/main/scala/poly/collection/RangeGroupQueryable.scala
|
Scala
|
mit
| 136 |
package io.policarp.scala.credstash
import io.policarp.scala.credstash.reader.{ CredValueReader, Readers }
object BaseClient {
type EncryptionContext = Map[String, String]
val EmptyEncryptionContext: EncryptionContext = Map()
val MostRecentVersion = ""
val DefaultCredentialTableName = "credential-store"
val DefaultCharacterEncoding = "UTF-8"
}
trait BaseClient {
import BaseClient._
def as[K](name: String, table: String = DefaultCredentialTableName, version: String = MostRecentVersion, context: Map[String, String] = EmptyEncryptionContext)(implicit reader: CredValueReader[K]): Option[K]
def get(name: String, table: String = DefaultCredentialTableName, version: String = MostRecentVersion, context: Map[String, String] = EmptyEncryptionContext) = {
as[String](name, table, version, context)(Readers.asString)
}
}
trait AmazonClients {
type KmsClient
type DynamoClient
val kmsClient: KmsClient
val dynamoClient: DynamoClient
}
trait EncryptionClients {
val aesEncryption: AESEncryption
}
|
kdrakon/scala-credstash
|
src/main/scala/io/policarp/scala/credstash/BaseClient.scala
|
Scala
|
apache-2.0
| 1,037 |
package visceljs
import scala.scalajs.js.URIUtils.encodeURIComponent
import scala.scalajs.js.URIUtils.decodeURIComponent
import viscel.shared.Vid
import visceljs.AppState.ViewState
sealed abstract class AppState(val urlhash: String) {
def transformPos(f: Int => Int) =
this match {
case ViewState(id, pos) => ViewState(id, f(pos))
case other => other
}
def position: Int =
this match {
case ViewState(id, pos) => pos
case _ => 0
}
}
object AppState {
def parse(path: String): AppState = {
val paths = path.substring(1).split("/").toList
paths match {
case Nil | "" :: Nil => IndexState
case encodedId :: Nil =>
val id = Vid.from(decodeURIComponent(encodedId))
FrontState(id)
case encodedId :: posS :: Nil =>
val id = Vid.from(decodeURIComponent(encodedId))
val pos = Integer.parseInt(posS)
ViewState(id, pos - 1)
case _ => IndexState
}
}
case object IndexState extends AppState("")
case class FrontState(id: Vid) extends AppState(encodeURIComponent(id.str))
case class ViewState(id: Vid, pos: Int) extends AppState(s"${encodeURIComponent(id.str)}/${pos + 1}")
}
|
rmgk/viscel
|
code/js/src/main/scala/visceljs/AppState.scala
|
Scala
|
agpl-3.0
| 1,252 |
/*
* sbt
* Copyright 2011 - 2018, Lightbend, Inc.
* Copyright 2008 - 2010, Mark Harrah
* Licensed under Apache License 2.0 (see LICENSE)
*/
package sbt.internal.inc
import java.io.File
import java.net.URLClassLoader
import sbt.io.IO
import sbt.io.syntax._
import sbt.librarymanagement._
import sbt.librarymanagement.ivy._
import sbt.util.Logger
import xsbti.compile.CompilerBridgeProvider
import org.scalatest._
import org.scalatest.matchers.should.Matchers
/**
* Base class for test suites that must be able to fetch and compile the compiler bridge.
*
* This is a very good example on how to instantiate the compiler bridge provider.
*/
abstract class IvyBridgeProviderSpecification
extends flatspec.FixtureAnyFlatSpec
with fixture.TestDataFixture
with Matchers {
def currentBase: File = new File(".")
def currentTarget: File = currentBase / "target" / "ivyhome"
def currentManaged: File = currentBase / "target" / "lib_managed"
def secondaryCacheDirectory: File = file("target").getAbsoluteFile / "zinc-components"
val resolvers = Array(
ZincComponentCompiler.LocalResolver: Resolver,
Resolver.mavenCentral: Resolver,
MavenRepository(
"scala-integration",
"https://scala-ci.typesafe.com/artifactory/scala-integration/"
): Resolver,
)
private def ivyConfiguration(log: Logger) =
getDefaultConfiguration(currentBase, currentTarget, resolvers, log)
def getZincProvider(bridge: ModuleID, targetDir: File, log: Logger): CompilerBridgeProvider = {
val lock = ZincComponentCompiler.getDefaultLock
val secondaryCache = Some(secondaryCacheDirectory)
val componentProvider = ZincComponentCompiler.getDefaultComponentProvider(targetDir)
val manager = new ZincComponentManager(lock, componentProvider, secondaryCache, log)
val dependencyResolution = IvyDependencyResolution(ivyConfiguration(log))
ZincComponentCompiler.interfaceProvider(bridge, manager, dependencyResolution, currentManaged)
}
def getCompilerBridge(
targetDir: File,
log: Logger,
scalaVersion: String,
)(implicit td: TestData): File = {
val zincVersion = td.configMap.get("sbt.zinc.version") match {
case Some(v: String) => v
case _ => throw new IllegalStateException("No zinc version specified")
}
val bridge0 = ZincLmUtil.getDefaultBridgeSourceModule(scalaVersion)
// redefine the compiler bridge version
// using the version of zinc used during testing
// this way when building with zinc as a source dependency
// these specs don't go looking for some SHA-suffixed compiler bridge
val bridge1 = bridge0.withRevision(zincVersion)
val provider = getZincProvider(bridge1, targetDir, log)
val scalaInstance = provider.fetchScalaInstance(scalaVersion, log)
val bridge = provider.fetchCompiledBridge(scalaInstance, log)
scalaInstance.loader.asInstanceOf[URLClassLoader].close()
scalaInstance.loaderLibraryOnly.asInstanceOf[URLClassLoader].close()
val target = targetDir / s"target-bridge-$scalaVersion.jar"
IO.copyFile(bridge, target)
target
}
private def getDefaultConfiguration(
baseDirectory: File,
ivyHome: File,
resolvers0: Array[Resolver],
log: xsbti.Logger,
): InlineIvyConfiguration = {
val resolvers = resolvers0.toVector
val chainResolver = ChainedResolver("zinc-chain", resolvers)
InlineIvyConfiguration()
.withPaths(IvyPaths(baseDirectory, Some(ivyHome)))
.withResolvers(resolvers)
.withModuleConfigurations(Vector(ModuleConfiguration("*", chainResolver)))
.withLock(None)
.withChecksums(Vector.empty)
.withResolutionCacheDir(ivyHome / "resolution-cache")
.withUpdateOptions(UpdateOptions())
.withLog(log)
}
}
|
xuwei-k/xsbt
|
zinc-lm-integration/src/test/scala/sbt/internal/inc/IvyBridgeProviderSpecification.scala
|
Scala
|
apache-2.0
| 3,785 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.streaming.sources
import org.apache.spark.sql.{ForeachWriter, SparkSession}
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.encoders.ExpressionEncoder
import org.apache.spark.sql.catalyst.expressions.UnsafeRow
import org.apache.spark.sql.execution.python.PythonForeachWriter
import org.apache.spark.sql.sources.v2.{DataSourceOptions, SupportsStreamingWrite, Table}
import org.apache.spark.sql.sources.v2.writer.{DataWriter, SupportsTruncate, WriteBuilder, WriterCommitMessage}
import org.apache.spark.sql.sources.v2.writer.streaming.{StreamingDataWriterFactory, StreamingWrite}
import org.apache.spark.sql.types.StructType
/**
* A write-only table for forwarding data into the specified [[ForeachWriter]].
*
* @param writer The [[ForeachWriter]] to process all data.
* @param converter An object to convert internal rows to target type T. Either it can be
* a [[ExpressionEncoder]] or a direct converter function.
* @tparam T The expected type of the sink.
*/
case class ForeachWriterTable[T](
writer: ForeachWriter[T],
converter: Either[ExpressionEncoder[T], InternalRow => T])
extends Table with SupportsStreamingWrite {
override def name(): String = "ForeachSink"
override def schema(): StructType = StructType(Nil)
override def newWriteBuilder(options: DataSourceOptions): WriteBuilder = {
new WriteBuilder with SupportsTruncate {
private var inputSchema: StructType = _
override def withInputDataSchema(schema: StructType): WriteBuilder = {
this.inputSchema = schema
this
}
// Do nothing for truncate. Foreach sink is special that it just forwards all the records to
// ForeachWriter.
override def truncate(): WriteBuilder = this
override def buildForStreaming(): StreamingWrite = {
new StreamingWrite {
override def commit(epochId: Long, messages: Array[WriterCommitMessage]): Unit = {}
override def abort(epochId: Long, messages: Array[WriterCommitMessage]): Unit = {}
override def createStreamingWriterFactory(): StreamingDataWriterFactory = {
val rowConverter: InternalRow => T = converter match {
case Left(enc) =>
val boundEnc = enc.resolveAndBind(
inputSchema.toAttributes,
SparkSession.getActiveSession.get.sessionState.analyzer)
boundEnc.fromRow
case Right(func) =>
func
}
ForeachWriterFactory(writer, rowConverter)
}
}
}
}
}
}
object ForeachWriterTable {
def apply[T](
writer: ForeachWriter[T],
encoder: ExpressionEncoder[T]): ForeachWriterTable[_] = {
writer match {
case pythonWriter: PythonForeachWriter =>
new ForeachWriterTable[UnsafeRow](
pythonWriter, Right((x: InternalRow) => x.asInstanceOf[UnsafeRow]))
case _ =>
new ForeachWriterTable[T](writer, Left(encoder))
}
}
}
case class ForeachWriterFactory[T](
writer: ForeachWriter[T],
rowConverter: InternalRow => T)
extends StreamingDataWriterFactory {
override def createWriter(
partitionId: Int,
taskId: Long,
epochId: Long): ForeachDataWriter[T] = {
new ForeachDataWriter(writer, rowConverter, partitionId, epochId)
}
}
/**
* A [[DataWriter]] which writes data in this partition to a [[ForeachWriter]].
*
* @param writer The [[ForeachWriter]] to process all data.
* @param rowConverter A function which can convert [[InternalRow]] to the required type [[T]]
* @param partitionId
* @param epochId
* @tparam T The type expected by the writer.
*/
class ForeachDataWriter[T](
writer: ForeachWriter[T],
rowConverter: InternalRow => T,
partitionId: Int,
epochId: Long)
extends DataWriter[InternalRow] {
// If open returns false, we should skip writing rows.
private val opened = writer.open(partitionId, epochId)
override def write(record: InternalRow): Unit = {
if (!opened) return
try {
writer.process(rowConverter(record))
} catch {
case t: Throwable =>
writer.close(t)
throw t
}
}
override def commit(): WriterCommitMessage = {
writer.close(null)
ForeachWriterCommitMessage
}
override def abort(): Unit = {}
}
/**
* An empty [[WriterCommitMessage]]. [[ForeachWriter]] implementations have no global coordination.
*/
case object ForeachWriterCommitMessage extends WriterCommitMessage
|
yanboliang/spark
|
sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/sources/ForeachWriterTable.scala
|
Scala
|
apache-2.0
| 5,362 |
package com.jasonnerothin.project
/** Created by IntelliJ IDEA.
* User: jason
* Date: 2/18/14
* Time: 11:12 AM
*
* Copyright [2014] [Jason Nerothin]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/** A projection function that would work for our implementation, but won't be
* very memory efficient.
*/
trait Projection {
/** Turns a bunch of points in 3-D into an co-domain of
* points on the x-axis. This method should be one-to-one, onto
* and idempotent.
* @param points some points
* @return some points on the x-axis
*/
def apply(points: Seq[Point]): Seq[Point] = {
points map {
case pnt: Point => function(pnt)
}
}
/** Turns a bunch of points on the x-axis into a bunch of points
* in 3-D by returning the domain of the apply function. In order to meet
* our larger implementation goals, this method should be one-to-one, onto
* and idempotent.
* @param points a corresponding bunch of points in 3-dimensions
* @return the projection operation's domain
*/
def unapply(points: Seq[Point]): Seq[Point] = {
points map {
case pnt: Point => inverse(pnt)
}
}
/** Maps from a Point defined in 3-dimensions to a point on
* the x-axis. Note that this method is idempotent (can be
* repeated against its own output and result in the same
* output), i.e. function(x) = function(function(x)). It should also
* be one-to-one and onto.
* @param p a point with no y,z components
* @return a point
*/
def function(p: Point): Point = {
Point(p.bigInt)
}
/** A function to get us from 1-dimension back out to 3-dimensions.
* Note that this method should also be idempotent:
* inverse(x) = inverse(inverse(x)). It should also be one-to-one
* and onto.
* @param p a point on the x-axis
* @return a point in 3-dimensions
*/
def inverse(p: Point): Point = {
Point(p.bigInt)
}
}
/** A simple class representing a point in 3-D space.
*/
case class Point(bigInt: BigInt) extends BitTwiddling{
def x = lowInt(bigInt)
def y = middleInt(bigInt)
def z = highInt(bigInt)
}
object Point {
import scala.language.implicitConversions
val tooBig = 2^IntWidth + 1
implicit def xyzToBigInt(arr: Array[Int]) : BigInt = {
require(arr.length == 3)
val x = arr(0)
val y = arr(1)
val z = arr(2)
require( x < tooBig && y < tooBig && z < tooBig )
BigInt.int2bigInt(x) | BigInt.int2bigInt(y) << IntWidth | BigInt.int2bigInt(z) << IntWidth
}
def apply(x:Int, y:Int, z:Int) : Point = {
new Point(Array(x,y,z))
}
}
|
jasonnerothin/projectit
|
src/main/scala/com/jasonnerothin/project/Projection.scala
|
Scala
|
apache-2.0
| 3,137 |
/*
* Copyright (C) 2012 Romain Reuillon
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.openmole.plugin.task.statistic
import org.openmole.core.workflow.builder.TaskBuilder
import org.openmole.core.workflow.data._
import org.openmole.core.workflow.task._
import org.openmole.core.workflow.data._
import org.openmole.core.workflow.task._
import scala.collection.mutable.ListBuffer
object StatisticTask {
def apply() = new StatisticTaskBuilder
}
abstract class StatisticTask extends Task {
def statistics: Iterable[(Prototype[Array[Double]], Prototype[Double], StatisticalAggregation[Double])]
override def process(context: Context) =
Context(
statistics.map {
case (sequence, statProto, agg) ⇒ Variable(statProto, agg(context(sequence)))
})
}
|
ISCPIF/PSEExperiments
|
openmole-src/openmole/plugins/org.openmole.plugin.task.statistic/src/main/scala/org/openmole/plugin/task/statistic/StatisticTask.scala
|
Scala
|
agpl-3.0
| 1,399 |
package org.renci.blazegraph
import java.io.OutputStream
import org.openrdf.rio.RDFWriter
import org.openrdf.rio.nquads.NQuadsWriter
import org.openrdf.rio.ntriples.NTriplesWriter
import org.openrdf.rio.rdfxml.RDFXMLWriter
import org.openrdf.rio.turtle.TurtleWriter
trait RDFOutputting extends Common {
def createOutputWriter(out: OutputStream): RDFWriter = outformat.getOrElse("turtle").toLowerCase match {
case "turtle" | "ttl" => new TurtleWriter(out)
case "rdfxml" | "rdf-xml" => new RDFXMLWriter(out)
case "ntriples" | "n-triples" => new NTriplesWriter(out)
case "nquads" | "n-quads" => new NQuadsWriter(out)
case other => throw new IllegalArgumentException(s"Invalid RDF output format: $other")
}
}
|
balhoff/blazegraph-runner
|
src/main/scala/org/renci/blazegraph/RDFOutputting.scala
|
Scala
|
bsd-3-clause
| 768 |
/*
This file is part of Intake24.
Copyright 2015, 2016 Newcastle University.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controllers.system
import controllers.DatabaseErrorHandler
import io.circe.generic.auto._
import javax.inject.Inject
import parsers.{JsonUtils, NotificationScheduleCSVParser}
import play.api.Configuration
import play.api.http.ContentTypes
import play.api.libs.Files
import play.api.mvc._
import security.Intake24RestrictedActionBuilder
import uk.ac.ncl.openlab.intake24.api.data._
import uk.ac.ncl.openlab.intake24.errors.UnexpectedDatabaseError
import uk.ac.ncl.openlab.intake24.services.systemdb.Roles
import uk.ac.ncl.openlab.intake24.services.systemdb.admin._
import uk.ac.ncl.openlab.intake24.services.systemdb.notifications.{NewNotification, NotificationScheduleDataService, RecallNotificationRequest}
import scala.concurrent.{ExecutionContext, Future}
class NotificationAdminController @Inject()(service: NotificationScheduleDataService,
userService: UserAdminService,
configuration: Configuration,
rab: Intake24RestrictedActionBuilder,
playBodyParsers: PlayBodyParsers,
val controllerComponents: ControllerComponents,
implicit val executionContext: ExecutionContext) extends BaseController
with DatabaseErrorHandler with JsonUtils {
private def doCreateOrUpdate(surveyId: String, notifications: Seq[RecallNotificationRequest]): Result = {
val ns = notifications.map { n =>
userService.getUserByEmail(n.userEmail) match {
case Right(u) => Right(NewNotification(u.id, Some(surveyId), n.dateTime, n.notificationType))
case Left(e) => Left(n.userEmail)
}
}
if (ns.exists(_.isLeft)) {
translateDatabaseError(UnexpectedDatabaseError(new Exception(s"Users with the following emails were not found: ${ns.filter(_.isLeft).map(_.left.get) mkString ", "}")))
} else {
translateDatabaseResult(service.batchCreate(ns.map(_.right.get)))
}
}
private def uploadCSV(formData: MultipartFormData[Files.TemporaryFile], surveyId: String): Result = {
if (formData.files.length != 1)
BadRequest(toJsonString(ErrorDescription("BadRequest", s"Expected exactly one file attachment, got ${formData.files.length}"))).as(ContentTypes.JSON)
else {
NotificationScheduleCSVParser.parseFile(formData.files(0).ref.path.toFile) match {
case Right(csvRecords) =>
doCreateOrUpdate(surveyId, csvRecords)
case Left(error) =>
BadRequest(toJsonString(ErrorDescription("InvalidCSV", error)))
}
}
}
def uploadNotificationsCSV(surveyId: String) = rab.restrictToRoles(Roles.superuser, Roles.surveyAdmin, Roles.surveyStaff(surveyId))(playBodyParsers.multipartFormData) {
request =>
Future {
uploadCSV(request.body, surveyId)
}
}
}
|
digitalinteraction/intake24
|
ApiPlayServer/app/controllers/system/NotificationAdminController.scala
|
Scala
|
apache-2.0
| 3,545 |
import scala.concurrent.Future
import init.Init
import org.apache.commons.logging.LogFactory
import play.api._
import play.api.mvc._
import play.api.mvc.Results._
object Global extends GlobalSettings {
override def onHandlerNotFound(request: RequestHeader) = {
Future.successful(NotFound)
}
override def onBadRequest(request: RequestHeader, error: String) = {
Future.successful(BadRequest("Bad Request: " + error))
}
override def onError(request: RequestHeader, ex: Throwable) = {
Logger.error(ex.toString, ex)
Future.successful(InternalServerError(ex.toString))
}
override def onStart(app: Application) {
Init.init()
}
override def onStop(app: Application) {
Init.shutdown()
}
}
|
gilt/cave
|
worker/app/Global.scala
|
Scala
|
mit
| 731 |
package com.arcusys.valamis.web.servlet
import com.arcusys.valamis.exception.EntityNotFoundException
import com.arcusys.valamis.lrssupport.lrs.service.util.{TinCanVerb, TinCanVerbs}
import com.arcusys.valamis.slide.service.SlideService
import com.arcusys.valamis.uri.model.TincanURIType
import com.arcusys.valamis.uri.service.TincanURIService
import com.arcusys.valamis.web.servlet.base.BaseApiController
import com.arcusys.valamis.web.servlet.base.exceptions.BadRequestException
import com.arcusys.valamis.web.servlet.request.uri.{URIActionType, URIRequest}
/**
* Create and provide URI for TinCan Objects
*/
class URIServlet extends BaseApiController {
lazy val uriService = inject[TincanURIService]
lazy val slideService = inject[SlideService]
get("/uri(/)")(jsonAction {
val uriRequest = URIRequest(this)
uriRequest.action match {
case None =>
val result = uriService.getOrCreate(
uriRequest.prefix,
uriRequest.id,
TincanURIType.withName(uriRequest.objectType.toLowerCase),
uriRequest.content)
result
case Some(URIActionType.GetAll) =>
uriService.getById(
uriRequest.skipTake,
uriRequest.filter)
case _ => throw new BadRequestException
}
})
get("/uri/verbs(/)")(jsonAction {
TinCanVerbs.all.map(x => TinCanVerb(TinCanVerbs.getVerbURI(x), x))
})
get("/uri/:objType/:objName") {
val uri = request.getRequestURL.toString
val result = uriService.getByURI(uri)
if (result.isDefined)
halt(200, result.get.content)
else
throw new EntityNotFoundException("Object not found")
}
}
|
arcusys/Valamis
|
valamis-portlets/src/main/scala/com/arcusys/valamis/web/servlet/UriServlet.scala
|
Scala
|
gpl-3.0
| 1,647 |
package org.littlewings.javaee7.batch
import javax.batch.api.{BatchProperty, AbstractBatchlet}
import javax.batch.runtime.context.JobContext
import javax.enterprise.context.Dependent
import javax.inject.{Inject, Named}
import org.jboss.logging.Logger
import org.littlewings.javaee7.service.LanguageService
@Named
@Dependent
class MyBatchlet extends AbstractBatchlet {
@Inject
private var jobContext: JobContext = _
@transient
private val logger: Logger = Logger.getLogger(getClass)
@Inject
@BatchProperty
private var message: String = _
@Inject
@BatchProperty
private var id: String = _
@Inject
private var languageService: LanguageService = _
@throws(classOf[Exception])
override def process(): String = {
logger.infof("***** start process MyBatchlet job. *****")
logger.infof("job name = %s", jobContext.getJobName)
val properties = jobContext.getProperties
logger.infof("properties jobProperty = %s", properties.getProperty("jobProperty"))
logger.infof("batch property message = %s", message)
val language = languageService.findById(id.toLong)
logger.infof("found entity, %s", language)
logger.infof("***** end process MyBatchlet job. *****")
"done."
}
}
|
kazuhira-r/javaee7-scala-examples
|
jbatch-getting-started/src/main/scala/org/littlewings/javaee7/batch/MyBatchlet.scala
|
Scala
|
mit
| 1,237 |
package org.jetbrains.plugins.scala
package lang
package psi
package impl
package expr
import com.intellij.lang.ASTNode
import com.intellij.psi.PsiElementVisitor
import org.jetbrains.plugins.scala.lang.psi.api.ScalaElementVisitor
import org.jetbrains.plugins.scala.lang.psi.api.expr._
/**
* @author Alexander.Podkhalyuzin
*/
class ScConstrBlockImpl(node: ASTNode) extends ScalaPsiElementImpl(node) with ScConstrBlock {
override def toString: String = "ConstructorBlock"
override def accept(visitor: ScalaElementVisitor) {
visitor.visitConstrBlock(this)
}
override def accept(visitor: PsiElementVisitor) {
visitor match {
case s: ScalaElementVisitor => s.visitConstrBlock(this)
case _ => super.accept(visitor)
}
}
}
|
LPTK/intellij-scala
|
src/org/jetbrains/plugins/scala/lang/psi/impl/expr/ScConstrBlockImpl.scala
|
Scala
|
apache-2.0
| 754 |
package specs.fast
import specs.Spec
import org.json4s.ast.fast._
class JBoolean extends Spec { def is = s2"""
The JBoolean value should
read a Boolean $readBooleanJBoolean
pattern match with JTrue $readBooleanJBooleanPatternMatchJTrue
pattern match with JTrue and fail with scala.MatchError $readBooleanJBooleanPatternMatchJTrueFail
pattern match with JFalse $readBooleanJBooleanPatternMatchJFalse
pattern match with JFalse and fail with scala.MatchError $readBooleanJBooleanPatternMatchJFalseFail
pattern match with JBoolean as true $readBooleanJBooleanPatternMatchJBooleanTrue
pattern match with JBoolean as true and fail with scala.MatchError $readBooleanJBooleanPatternMatchJBooleanTrueFail
pattern match with JBoolean as false $readBooleanJBooleanPatternMatchJBooleanFalse
pattern match with JBoolean as false and fail with scala.MatchError $readBooleanJBooleanPatternMatchJBooleanFalseFail
The JTrue value should
read a Boolean as true $readBooleanJTrue
The JFalse value should
read a Boolean as false $readBooleanJFalse
"""
def readBooleanJBoolean = prop {b: Boolean =>
JBoolean(b).get must beEqualTo(b)
}
def readBooleanJBooleanPatternMatchJBooleanTrue = prop {b: Boolean =>
{b == true} ==> {
val result = JBoolean(b) match {
case f @ JBoolean(true) => f
}
result.get must beEqualTo(b)
}
}
def readBooleanJBooleanPatternMatchJBooleanTrueFail = prop {b: Boolean =>
{b == true} ==> {
{
JBoolean(b) match {
case f @ JBoolean(false) => f
}
} must throwAn[MatchError]
}
}
def readBooleanJBooleanPatternMatchJBooleanFalse = prop {b: Boolean =>
{b == false} ==> {
val result = JBoolean(b) match {
case f @ JBoolean(false) => f
}
result.get must beEqualTo(b)
}
}
def readBooleanJBooleanPatternMatchJBooleanFalseFail = prop {b: Boolean =>
{b == false} ==> {
{
JBoolean(b) match {
case f @ JBoolean(true) => f
}
} must throwAn[MatchError]
}
}
def readBooleanJBooleanPatternMatchJTrue = prop {b: Boolean =>
(b == true) ==> {
val result = JBoolean(b) match {
case f @ JTrue => f
}
result.get must beEqualTo(b)
}
}
def readBooleanJBooleanPatternMatchJTrueFail = prop {b: Boolean =>
(b == true) ==> {
{
JBoolean(b) match {
case f @ JFalse => f
}
} must throwAn[MatchError]
}
}
def readBooleanJBooleanPatternMatchJFalse = prop {b: Boolean =>
(b == false) ==> {
val result = JBoolean(b) match {
case f @ JFalse => f
}
result.get must beEqualTo(b)
}
}
def readBooleanJBooleanPatternMatchJFalseFail = prop {b: Boolean =>
(b == false) ==> {
{
JBoolean(b) match {
case f @ JTrue => f
}
} must throwAn[MatchError]
}
}
def readBooleanJTrue = prop {b: Boolean =>
(b == true) ==> {
JTrue.get must beEqualTo(b)
}
}
def readBooleanJFalse = prop {b: Boolean =>
(b == false) ==> {
JFalse.get must beEqualTo(b)
}
}
}
|
json4s/json4s-ast
|
jvm/src/test/scala/specs/fast/JBoolean.scala
|
Scala
|
apache-2.0
| 3,163 |
package com.fuckingaroundwithspray
import akka.actor.{ActorContext, Actor}
import spray.routing._
import spray.http.MediaTypes._
/**
* Actor to hold and run our service. All we have to say now is "Release the
* Actors!" and watch an application magically appear.
*/
class FuckingServiceActor extends Actor with FuckingService {
def actorRefFactory: ActorContext = context
def receive: Actor.Receive = runRoute(fuckingRoute)
}
/**
* Service itself, separated from the actor above. Allows testing without
* initialising an actor framework.
*/
trait FuckingService extends HttpService {
val fuckingRoute = {
path("") {
respondWithMediaType(`text/plain`) {
complete("you get NOTHING!")
}
}
}
}
|
jbirch/fuckin-about-with-scala
|
src/main/scala/com/fuckingaroundwithspray/FuckingService.scala
|
Scala
|
bsd-3-clause
| 737 |
/*
* Copyright (c) 2013-2014 Telefónica Investigación y Desarrollo S.A.U.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package es.tid.cosmos.infinity.server.hadoop
import es.tid.cosmos.infinity.common.fs.{Path, PathMetadata}
import es.tid.cosmos.infinity.common.permissions.PermissionsMask
/** An abstraction for an HDFS name node service. */
trait NameNode extends UserPrivileges {
/** Retrieve the file or directory metadata corresponding to the given path.
*
* @param path The path whose metadata is to be obtained.
* @return a FileMetadata if path contains a file, or DirectoryMetadata if it contains a directory.
*/
@throws[HdfsException.IOError]
@throws[HdfsException.Unauthorized]
@throws[NameNodeException.NoSuchPath]
def pathMetadata(path: Path): PathMetadata
/** Create a new file on the given path.
*
* @param path The path where the new file is to be created
* @param owner The owner of the new file
* @param group The group of the new file
* @param permissions The permissions for the new file
* @param replication The replication factor for the new file
* @param blockSize The block size for the new file
*/
@throws[HdfsException.IOError]
@throws[NameNodeException.PathAlreadyExists]
@throws[NameNodeException.ParentNotDirectory]
@throws[HdfsException.Unauthorized]
@throws[NameNodeException.NoSuchPath]
def createFile(
path: Path,
owner: String,
group: String,
permissions: PermissionsMask,
replication: Option[Short],
blockSize: Option[Long]): Unit
/** Create a new directory on the given path.
*
* @param path The path where the new directory is to be created
* @param owner The owner of the new directory
* @param group The group of the new directory
* @param permissions The permissions for the new directory
* @throws NameNodeException.NoSuchPath if some of the parents in the path doesn't exist
* @throws NameNodeException.PathAlreadyExists if there is already a file or directory on
* the given path
* @throws NameNodeException.ParentNotDirectory if the parent path is not a directory
* @throws NameNodeException.Unauthorized if the current user is not authorized to do the
* action
* @throws NameNodeException.IOError if there is an unexpected IO error
*/
def createDirectory(
path: Path,
owner: String,
group: String,
permissions: PermissionsMask): Unit
/** Delete the file or directory on the given path.
*
* @param path The path of the file or directory to be removed.
* @param recursive Indicates whether contents must be removed recursively in case of directory
* (ignored for files).
* @throws NameNodeException.NoSuchPath if there is no file or directory on the given path
* @throws NameNodeException.Unauthorized if the current user is not authorized to do the
* action
* @throws NameNodeException.IOError if there is an unexpected IO error
*/
def deletePath(path: Path, recursive: Boolean): Unit
/** Move the file or directory located in the given path to a new one.
*
* @param from The path of the file or directory to be moved.
* @param to The path of the new location of the file or directory.
* @throws NameNodeException.NoSuchPath if there is no file or directory on the path
* indicated by `from` argument
* @throws NameNodeException.PathAlreadyExists if there is already a file or directory
* in the path indicated by `to` argument
* @throws NameNodeException.Unauthorized if the current user is not authorized to do the
* action
* @throws NameNodeException.IOError if there is an unexpected IO error
*/
def movePath(from: Path, to: Path): Unit
/** Set the owner of a given file or directory.
*
* @param path The path to the file or directory whose owner is to be set
* @param newOwner The new owner of the file or directory
* @throws NameNodeException.NoSuchPath if there is no file or directory on the given path
* @throws NameNodeException.Unauthorized if the current user is not authorized to do the
* action
* @throws NameNodeException.IOError if there is an unexpected IO error
*/
def setOwner(path: Path, newOwner: String): Unit
/** Set the group of a given file or directory.
*
* @param path The path to the file or directory whose group is to be set
* @param newGroup The new group of the file or directory
* @throws NameNodeException.NoSuchPath if there is no file or directory on the given path
* @throws NameNodeException.Unauthorized if the current user is not authorized to do the
* action
* @throws NameNodeException.IOError if there is an unexpected IO error
*/
def setGroup(path: Path, newGroup: String): Unit
/** Set the permissions for a given file or directory.
*
* @param path The path to the file or directory whose group is to be set
* @param permissions The new permissions of the file or directory
* @throws NameNodeException.NoSuchPath if there is no file or directory on the given path
* @throws NameNodeException.Unauthorized if the current user is not authorized to do the
* action
* @throws NameNodeException.IOError if there is an unexpected IO error
*/
def setPermissions(path: Path, permissions: PermissionsMask): Unit
}
|
telefonicaid/fiware-cosmos-platform
|
infinity/server/src/main/scala/es/tid/cosmos/infinity/server/hadoop/NameNode.scala
|
Scala
|
apache-2.0
| 5,983 |
package scodec.protocols.mpeg
package transport
import scalaz.{ \\/, -\\/, \\/- }
import \\/.{ left, right }
import scalaz.stream.{ Process1, Process }
import scodec.{ Attempt, Codec, DecodeResult, DecodingContext, Err, SizeBound }
import scodec.bits._
import scodec.stream.decode.{ StreamDecoder, many => decodeMany }
import psi.{ Section, SectionHeader, SectionCodec }
/** Supports depacketization of an MPEG transport stream, represented as a stream of `Packet`s. */
object Demultiplexer {
sealed trait Result
case class SectionResult(section: Section) extends Result
case class PesPacketResult(body: PesPacket) extends Result
private sealed trait DecodeState
private case class AwaitingHeader(acc: BitVector) extends DecodeState
private case class AwaitingSectionBody(header: SectionHeader, acc: BitVector) extends DecodeState
private case class AwaitingPesBody(header: PesPacketHeaderPrefix, acc: BitVector) extends DecodeState
/**
* Stream transducer that converts packets in to sections and PES packets.
*
* The packets may span PID values. De-packetization is performed on each PID and as whole messages are received,
* reassembled messages are emitted.
*
* PES packets emitted by this method never include parsed headers -- that is, every emitted PES packet is of
* type `PesPacket.WithoutHeader`. To get PES packets with parsed headers, use `demultiplexWithPesHeaders`.
*
* Errors encountered while depacketizing are emitted.
*
* Upon noticing a PID discontinuity, an error is emitted and PID decoding state is discarded, resulting in any in-progress
* section decoding to be lost for that PID.
*/
def demultiplex(sectionCodec: SectionCodec): Process1[Packet, PidStamped[DemultiplexerError \\/ Result]] =
demultiplexGeneral(sectionCodec.decodeSection(_)(_), (pph, b) => Attempt.successful(DecodeResult(PesPacket.WithoutHeader(pph.streamId, b), BitVector.empty)))
/** Variant of `demultiplex` that parses PES packet headers. */
def demultiplexWithPesHeaders(sectionCodec: SectionCodec): Process1[Packet, PidStamped[DemultiplexerError \\/ Result]] =
demultiplexGeneral(sectionCodec.decodeSection(_)(_), PesPacket.decode)
/** Generic variant of `demultiplex` that allows section and PES decoding to be explicitly specified. */
def demultiplexGeneral(
decodeSectionBody: (SectionHeader, BitVector) => Attempt[DecodeResult[Section]],
decodePesBody: (PesPacketHeaderPrefix, BitVector) => Attempt[DecodeResult[PesPacket]]
): Process1[Packet, PidStamped[DemultiplexerError \\/ Result]] = {
type Step = Process1[PidStamped[DemultiplexerError.Discontinuity] \\/ Packet, PidStamped[DemultiplexerError \\/ Result]]
def pidSpecificErr(pid: Pid, e: DemultiplexerError): PidStamped[DemultiplexerError \\/ Result] =
PidStamped(pid, left(e))
def pidSpecificSection(pid: Pid, s: Section): PidStamped[DemultiplexerError \\/ Result] =
PidStamped(pid, right(SectionResult(s)))
def pidSpecificPesPacket(pid: Pid, pesPacket: PesPacket): PidStamped[DemultiplexerError \\/ Result] =
PidStamped(pid, right(PesPacketResult(pesPacket)))
def nextMessage(state: Map[Pid, DecodeState], pid: Pid, payloadUnitStart: Option[Int], payload: BitVector): Step = {
payloadUnitStart match {
case None => go(state)
case Some(start) =>
val bits = payload.drop(start * 8L)
if (bits.sizeLessThan(16)) {
go(state + (pid -> AwaitingHeader(bits)))
} else {
// Check for PES start code prefix
if (start == 0 && bits.take(16) == hex"0001".bits) {
if (bits.sizeLessThan(40)) {
go(state + (pid -> AwaitingHeader(bits)))
} else {
Codec[PesPacketHeaderPrefix].decode(bits.drop(16)) match {
case Attempt.Successful(DecodeResult(header, bitsPostHeader)) =>
pesBody(state, pid, header, bitsPostHeader, None)
case Attempt.Failure(err) =>
Process.emit(pidSpecificErr(pid, DemultiplexerError.Decoding(err))) ++ go(state - pid)
}
}
} else {
if (bits.sizeLessThan(32)) {
go(state + (pid -> AwaitingHeader(bits)))
} else {
Codec[SectionHeader].decode(bits) match {
case Attempt.Failure(err) =>
Process.emit(pidSpecificErr(pid, DemultiplexerError.Decoding(err))) ++ go(state - pid)
case Attempt.Successful(DecodeResult(header, bitsPostHeader)) =>
sectionBody(state, pid, header, bitsPostHeader)
}
}
}
}
}
}
def pesBody(state: Map[Pid, DecodeState], pid: Pid, header: PesPacketHeaderPrefix, bitsPostHeader: BitVector, packet: Option[Packet]): Step = {
def doDecodePesBody(pesBodyBits: BitVector): Step = {
decodePesBody(header, pesBodyBits) match {
case Attempt.Successful(DecodeResult(pesBody, rest)) =>
Process.emit(pidSpecificPesPacket(pid, pesBody)) ++ go(state - pid)
case Attempt.Failure(err) =>
Process.emit(pidSpecificErr(pid, DemultiplexerError.Decoding(err))) ++ go(state - pid)
}
}
// TODO if header.length is 0, must decode until next packet with payload start indicator
if (header.length == 0L) {
if (packet.map { _.payloadUnitStart.isDefined }.getOrElse(false)) {
doDecodePesBody(bitsPostHeader) ++ handlePacket(state - pid, packet.get)
} else {
go(state + (pid -> AwaitingPesBody(header, bitsPostHeader)))
}
} else {
val neededBits = header.length * 8
if (bitsPostHeader.size < neededBits) {
go(state + (pid -> AwaitingPesBody(header, bitsPostHeader)))
} else {
doDecodePesBody(bitsPostHeader.take(neededBits.toLong)) ++ go(state - pid)
}
}
}
def sectionBody(state: Map[Pid, DecodeState], pid: Pid, header: SectionHeader, bitsPostHeader: BitVector): Step = {
val neededBits = header.length * 8
if (bitsPostHeader.size < neededBits) {
go(state + (pid -> AwaitingSectionBody(header, bitsPostHeader)))
} else {
decodeSectionBody(header, bitsPostHeader) match {
case Attempt.Failure(err) =>
val rest = bitsPostHeader.drop(neededBits.toLong)
Process.emit(pidSpecificErr(pid, DemultiplexerError.Decoding(err))) ++ potentiallyNextSection(state, pid, rest)
case Attempt.Successful(DecodeResult(section, rest)) =>
Process.emit(pidSpecificSection(pid, section)) ++ potentiallyNextSection(state, pid, rest)
}
}
}
def potentiallyNextSection(state: Map[Pid, DecodeState], pid: Pid, payload: BitVector): Step = {
// Peek at table_id -- if we see 0xff, then there are no further sections in this packet
if (payload.size >= 8 && payload.take(8) != BitVector.high(8))
nextMessage(state - pid, pid, Some(0), payload)
else go(state - pid)
}
def handlePacket(state: Map[Pid, DecodeState], packet: Packet): Step = {
val pid = packet.header.pid
packet.payload match {
case None => go(state)
case Some(payload) =>
state.get(packet.header.pid) match {
case None =>
nextMessage(state, packet.header.pid, packet.payloadUnitStart, payload)
case Some(AwaitingHeader(acc)) =>
nextMessage(state, packet.header.pid, Some(0), acc ++ payload)
case Some(AwaitingSectionBody(header, acc)) =>
sectionBody(state, packet.header.pid, header, acc ++ payload)
case Some(AwaitingPesBody(header, acc)) =>
pesBody(state, packet.header.pid, header, acc ++ payload, Some(packet))
}
}
}
def go(state: Map[Pid, DecodeState]): Step =
Process.await1[PidStamped[DemultiplexerError.Discontinuity] \\/ Packet].flatMap {
case -\\/(discontinuity) =>
Process.emit(pidSpecificErr(discontinuity.pid, discontinuity.value)) ++ go(state - discontinuity.pid)
case \\/-(packet) =>
handlePacket(state, packet)
}
Packet.validateContinuity pipe go(Map.empty)
}
/** Provides a stream decoder that decodes a bitstream of 188 byte MPEG packets in to a stream of messages. */
def packetStreamDecoder(sectionCodec: SectionCodec): StreamDecoder[PidStamped[DemultiplexerError \\/ Result]] = decodeMany[Packet] pipe demultiplex(sectionCodec)
}
|
jrudnick/scodec-protocols
|
src/main/scala/scodec/protocols/mpeg/transport/Demultiplexer.scala
|
Scala
|
bsd-3-clause
| 8,597 |
package fix
package v0_8_0
import com.spotify.scio.bigquery._
import com.spotify.scio.jdbc._
import com.spotify.scio.jdbc.syntax.JdbcScioContextOps
import com.spotify.scio.transforms.BaseAsyncLookupDoFn
object Ops {
import java.util.Date
import com.spotify.scio.jdbc.{CloudSqlOptions, JdbcConnectionOptions, JdbcReadOptions}
def apply(date: Date,
sc: JdbcScioContextOps,
cloudSqlOptions: CloudSqlOptions) = ()
def handleResponse(name: String, value: BaseAsyncLookupDoFn.Try[String]): Unit = ???
}
|
spotify/scio
|
scalafix/output-0_8/src/main/scala/fix/FixSyntaxImports.scala
|
Scala
|
apache-2.0
| 532 |
/*
,i::,
:;;;;;;;
;:,,::;.
1ft1;::;1tL
t1;::;1,
:;::; _____ __ ___ __
fCLff ;:: tfLLC / ___/ / |/ /____ _ _____ / /_
CLft11 :,, i1tffLi \\__ \\ ____ / /|_/ // __ `// ___// __ \\
1t1i .;; .1tf ___/ //___// / / // /_/ // /__ / / / /
CLt1i :,: .1tfL. /____/ /_/ /_/ \\__,_/ \\___//_/ /_/
Lft1,:;: , 1tfL:
;it1i ,,,:::;;;::1tti s_mach.concurrent
.t1i .,::;;; ;1tt Copyright (c) 2017 S-Mach, Inc.
Lft11ii;::;ii1tfL: Author: [email protected]
.L1 1tt1ttt,,Li
...1LLLL...
*/
package s_mach.concurrent.impl
import scala.language.higherKinds
import scala.collection.generic.CanBuildFrom
import scala.concurrent.{ExecutionContext, Future}
import s_mach.concurrent.config._
trait AbstractCollectionAsyncTaskRunner[
A,
M[+AA] <: TraversableOnce[AA],
MDT <: AbstractCollectionAsyncTaskRunner[A,M,MDT]
] extends AbstractAsyncConfigBuilder[MDT] {
def input: M[A]
def optTotal = if(input.hasDefiniteSize) {
Some(input.size)
} else {
None
}
}
/**
* A case class for a serial asynchronous task runner that is configurable with
* optional progress reporting, throttling and/or failure retry.
* @param input the input collection
* @param optProgress optional progress reporting settings
* @param optRetry optional failure retry settings
* @param optThrottle optional throttle settings
* @tparam A the input type
* @tparam M the collection type
*/
case class CollectionAsyncTaskRunner[A,M[+AA] <: TraversableOnce[AA]](
input: M[A],
optProgress: Option[ProgressConfig] = None,
optRetry: Option[RetryConfig] = None,
optThrottle: Option[ThrottleConfig] = None
) extends AbstractCollectionAsyncTaskRunner[
A,
M,
CollectionAsyncTaskRunner[A,M]
] {
val workerCount = 1
def using(
optProgress: Option[ProgressConfig] = optProgress,
optRetry: Option[RetryConfig] = optRetry,
optThrottle: Option[ThrottleConfig] = optThrottle
) = copy(
optProgress = optProgress,
optRetry = optRetry,
optThrottle = optThrottle
)
/** @return a parallel async task runner configured with a copy of all
* settings */
def par = ParCollectionAsyncTaskRunner[A,M](
input = input,
optProgress = optProgress,
optRetry = optRetry,
optThrottle = optThrottle
)
/**
* @return a parallel async task runner configured to run with workerCount
* workers and with a copy of all settings */
def par(workerCount: Int) = ParCollectionAsyncTaskRunner[A,M](
input = input,
workerCount = workerCount,
optProgress = optProgress,
optRetry = optRetry,
optThrottle = optThrottle
)
import SeriallyOps._
@inline def map[B](f: A => Future[B])(implicit
cbf: CanBuildFrom[Nothing, B, M[B]],
ec: ExecutionContext
) : Future[M[B]] = {
AsyncTaskRunner(this).runTask1(input, mapSerially[A,B,M], f)
}
@inline def flatMap[B](f: A => Future[TraversableOnce[B]])(implicit
cbf: CanBuildFrom[Nothing, B, M[B]],
ec: ExecutionContext
) : Future[M[B]] = {
AsyncTaskRunner(this).runTask1(input, flatMapSerially[A,B,M], f)
}
@inline def foreach[U](f: A => Future[U])(implicit
ec: ExecutionContext
) : Future[Unit] = {
AsyncTaskRunner(this).runTask1(input, foreachSerially[A,U,M], f)
}
@inline def foldLeft[B](z:B)(f: (B,A) => Future[B])(implicit
cbf: CanBuildFrom[Nothing, B, M[B]],
ec: ExecutionContext
) : Future[B] = {
val fSwap = { (a:A,b:B) => f(b,a) }
AsyncTaskRunner(this).runTask2[A,B,B,M,B](
input,
foldLeftSerially[A,B,M](z),
fSwap
)
}
}
/**
* A case class for a parallel asynchronous task runner that is configurable
* with optional progress reporting, throttling and/or failure retry.
* @param input the input collection
* @param optProgress optional progress reporting settings
* @param optRetry optional failure retry settings
* @param optThrottle optional throttle settings
* @tparam A the input type
* @tparam M the collection type
*/
case class ParCollectionAsyncTaskRunner[
A,
M[+AA] <: TraversableOnce[AA]
](
input: M[A],
workerCount: Int = AsyncConfig.DEFAULT_PAR_WORKER_COUNT,
optProgress: Option[ProgressConfig] = None,
optRetry: Option[RetryConfig] = None,
optThrottle: Option[ThrottleConfig] = None
) extends AbstractCollectionAsyncTaskRunner[
A,
M,
ParCollectionAsyncTaskRunner[A,M]
] {
def using(
optProgress: Option[ProgressConfig] = optProgress,
optRetry: Option[RetryConfig] = optRetry,
optThrottle: Option[ThrottleConfig] = optThrottle
) = copy(
optProgress = optProgress,
optRetry = optRetry,
optThrottle = optThrottle
)
import WorkersOps._
@inline def map[B](f: A => Future[B])(implicit
cbf: CanBuildFrom[Nothing, B, M[B]],
ec: ExecutionContext
) : Future[M[B]] = {
AsyncTaskRunner(this).runTask1(
input,
mapWorkers[A,B,M](workerCount),
f
)
}
@inline def flatMap[B](f: A => Future[TraversableOnce[B]])(implicit
cbf: CanBuildFrom[Nothing, B, M[B]],
ec: ExecutionContext
) : Future[M[B]] = {
AsyncTaskRunner(this).runTask1(
input,
flatMapWorkers[A,B,M](workerCount),
f
)
}
@inline def foreach[U](f: A => Future[U])(implicit
ec: ExecutionContext
) : Future[Unit] = {
AsyncTaskRunner(this).runTask1(
input,
foreachWorkers[A,U,M](workerCount),
f
)
}
}
|
S-Mach/s_mach.concurrent
|
src/main/scala/s_mach/concurrent/impl/CollectionAsyncTaskRunner.scala
|
Scala
|
mit
| 5,628 |
package breeze.optimize
import breeze.linalg.norm
import breeze.math.{MutableEnumeratedCoordinateField, MutableFiniteCoordinateField, NormedModule}
import breeze.optimize.FirstOrderMinimizer.ConvergenceCheck
import breeze.stats.distributions.{RandBasis, ThreadLocalRandomGenerator}
import breeze.util.Implicits._
import breeze.util.SerializableLogging
import org.apache.commons.math3.random.MersenneTwister
import breeze.macros._
import scala.collection.mutable.ArrayBuffer
/**
*
* @author dlwh
*/
abstract class FirstOrderMinimizer[T, DF <: StochasticDiffFunction[T]](val convergenceCheck: ConvergenceCheck[T])(
implicit space: NormedModule[T, Double])
extends Minimizer[T, DF]
with SerializableLogging {
def this(maxIter: Int = -1, tolerance: Double = 1E-6, fvalMemory: Int = 100, relativeTolerance: Boolean = true)(
implicit space: NormedModule[T, Double]) =
this(FirstOrderMinimizer.defaultConvergenceCheck[T](maxIter, tolerance, relativeTolerance, fvalMemory))
/**
* Any history the derived minimization function needs to do its updates. typically an approximation
* to the second derivative/hessian matrix.
*/
type History
type State = FirstOrderMinimizer.State[T, convergenceCheck.Info, History]
import space.normImpl
protected def initialHistory(f: DF, init: T): History
protected def adjustFunction(f: DF): DF = f
protected def adjust(newX: T, newGrad: T, newVal: Double): (Double, T) = (newVal, newGrad)
protected def chooseDescentDirection(state: State, f: DF): T
protected def determineStepSize(state: State, f: DF, direction: T): Double
protected def takeStep(state: State, dir: T, stepSize: Double): T
protected def updateHistory(newX: T, newGrad: T, newVal: Double, f: DF, oldState: State): History
protected def initialState(f: DF, init: T): State = {
val x = init
val history = initialHistory(f, init)
val (value, grad) = calculateObjective(f, x, history)
val (adjValue, adjGrad) = adjust(x, grad, value)
FirstOrderMinimizer.State(x, value, grad, adjValue, adjGrad, 0, adjValue, history, convergenceCheck.initialInfo)
}
protected def calculateObjective(f: DF, x: T, history: History): (Double, T) = {
f.calculate(x)
}
def infiniteIterations(f: DF, state: State): Iterator[State] = {
var failedOnce = false
val adjustedFun = adjustFunction(f)
Iterator.iterate(state) { state =>
try {
val dir = chooseDescentDirection(state, adjustedFun)
val stepSize = determineStepSize(state, adjustedFun, dir)
logger.info(f"Step Size: $stepSize%.4g")
val x = takeStep(state, dir, stepSize)
val (value, grad) = calculateObjective(adjustedFun, x, state.history)
val (adjValue, adjGrad) = adjust(x, grad, value)
val oneOffImprovement = (state.adjustedValue - adjValue) / (state.adjustedValue.abs
.max(adjValue.abs)
.max(1E-6 * state.initialAdjVal.abs))
logger.info(f"Val and Grad Norm: $adjValue%.6g (rel: $oneOffImprovement%.3g) ${norm(adjGrad)}%.6g")
val history = updateHistory(x, grad, value, adjustedFun, state)
val newCInfo = convergenceCheck.update(x, adjGrad, adjValue, state, state.convergenceInfo)
failedOnce = false
FirstOrderMinimizer.State(
x,
value,
grad,
adjValue,
adjGrad,
state.iter + 1,
state.initialAdjVal,
history,
newCInfo)
} catch {
case x: FirstOrderException if !failedOnce =>
failedOnce = true
logger.error("Failure! Resetting history: " + x)
state.copy(history = initialHistory(adjustedFun, state.x))
case x: FirstOrderException =>
logger.error("Failure again! Giving up and returning. Maybe the objective is just poorly behaved?")
state.copy(searchFailed = true)
}
}
}
def iterations(f: DF, init: T): Iterator[State] = {
val adjustedFun = adjustFunction(f)
infiniteIterations(f, initialState(adjustedFun, init)).takeUpToWhere { s =>
convergenceCheck.apply(s, s.convergenceInfo) match {
case Some(converged) =>
logger.info(s"Converged because ${converged.reason}")
s.convergenceReason = Some(converged)
true
case None =>
false
}
}
}
def minimize(f: DF, init: T): T = {
minimizeAndReturnState(f, init).x
}
def minimizeAndReturnState(f: DF, init: T): State = {
iterations(f, init).last
}
}
sealed class FirstOrderException(msg: String = "") extends RuntimeException(msg)
class NaNHistory extends FirstOrderException
class StepSizeUnderflow extends FirstOrderException
class StepSizeOverflow extends FirstOrderException
class LineSearchFailed(gradNorm: Double, dirNorm: Double)
extends FirstOrderException("Grad norm: %.4f Dir Norm: %.4f".format(gradNorm, dirNorm))
object FirstOrderMinimizer {
/**
* Tracks the information about the optimizer, including the current point, its value, gradient, and then any history.
* Also includes information for checking convergence.
* @param x the current point being considered
* @param value f(x)
* @param grad f.gradientAt(x)
* @param adjustedValue f(x) + r(x), where r is any regularization added to the objective. For LBFGS, this is f(x).
* @param adjustedGradient f'(x) + r'(x), where r is any regularization added to the objective. For LBFGS, this is f'(x).
* @param iter what iteration number we are on.
* @param initialAdjVal f(x_0) + r(x_0), used for checking convergence
* @param history any information needed by the optimizer to do updates.
* @param searchFailed did the line search fail?
* @param convergenceReason the convergence reason
*/
case class State[+T, +ConvergenceInfo, +History](
x: T,
value: Double,
grad: T,
adjustedValue: Double,
adjustedGradient: T,
iter: Int,
initialAdjVal: Double,
history: History,
convergenceInfo: ConvergenceInfo,
searchFailed: Boolean = false,
var convergenceReason: Option[ConvergenceReason] = None) {}
trait ConvergenceCheck[T] {
type Info
def initialInfo: Info
def apply(state: State[T, _, _], info: Info): Option[ConvergenceReason]
def update(newX: T, newGrad: T, newVal: Double, oldState: State[T, _, _], oldInfo: Info): Info
def ||(otherCheck: ConvergenceCheck[T]): ConvergenceCheck[T] = orElse(otherCheck)
def orElse(other: ConvergenceCheck[T]): ConvergenceCheck[T] = {
SequenceConvergenceCheck(asChecks ++ other.asChecks)
}
protected def asChecks: IndexedSeq[ConvergenceCheck[T]] = IndexedSeq(this)
}
object ConvergenceCheck {
implicit def fromPartialFunction[T](pf: PartialFunction[State[T, _, _], ConvergenceReason]): ConvergenceCheck[T] =
new ConvergenceCheck[T] {
override type Info = Unit
def update(newX: T, newGrad: T, newVal: Double, oldState: State[T, _, _], oldInfo: Info): Info = oldInfo
override def apply(state: State[T, _, _], info: Info): Option[ConvergenceReason] = pf.lift(state)
override def initialInfo: Info = ()
}
}
case class SequenceConvergenceCheck[T](checks: IndexedSeq[ConvergenceCheck[T]]) extends ConvergenceCheck[T] {
type Info = IndexedSeq[ConvergenceCheck[T]#Info]
override def initialInfo: IndexedSeq[ConvergenceCheck[T]#Info] = checks.map(_.initialInfo: ConvergenceCheck[T]#Info)
override def update(newX: T, newGrad: T, newVal: Double, oldState: State[T, _, _], oldInfo: Info): Info = {
require(oldInfo.length == checks.length)
val out = ArrayBuffer[ConvergenceCheck[T]#Info]()
cforRange(0 until checks.length) { j =>
val c = checks(j)
val i = oldInfo(j)
out += c.update(newX, newGrad, newVal, oldState, i.asInstanceOf[c.Info])
}
out.toIndexedSeq
}
override def apply(state: State[T, _, _], info: IndexedSeq[ConvergenceCheck[T]#Info]): Option[ConvergenceReason] = {
checks.zip(info).iterator.flatMap { case (c, i) => c(state, i.asInstanceOf[c.Info]) }.toStream.headOption
}
}
trait ConvergenceReason {
def reason: String
}
case object MaxIterations extends ConvergenceReason {
override def reason: String = "max iterations reached"
}
case object FunctionValuesConverged extends ConvergenceReason {
override def reason: String = "function values converged"
}
case object GradientConverged extends ConvergenceReason {
override def reason: String = "gradient converged"
}
case object SearchFailed extends ConvergenceReason {
override def reason: String = "line search failed!"
}
case object MonitorFunctionNotImproving extends ConvergenceReason {
override def reason: String = "monitor function is not improving"
}
case object ProjectedStepConverged extends ConvergenceReason {
override def reason: String = "projected step converged"
}
def maxIterationsReached[T](maxIter: Int): ConvergenceCheck[T] = ConvergenceCheck.fromPartialFunction {
case s: State[_, _, _] if (s.iter >= maxIter && maxIter >= 0) =>
MaxIterations
}
def functionValuesConverged[T](
tolerance: Double = 1E-9,
relative: Boolean = true,
historyLength: Int = 10): ConvergenceCheck[T] = {
new FunctionValuesConverged[T](tolerance, relative, historyLength)
}
case class FunctionValuesConverged[T](tolerance: Double, relative: Boolean, historyLength: Int)
extends ConvergenceCheck[T] {
override type Info = IndexedSeq[Double]
override def update(newX: T, newGrad: T, newVal: Double, oldState: State[T, _, _], oldInfo: Info): Info = {
(oldInfo :+ newVal).takeRight(historyLength)
}
override def apply(state: State[T, _, _], info: IndexedSeq[Double]): Option[ConvergenceReason] = {
if (info.length >= 2 && (state.adjustedValue - info.max).abs <= tolerance * (if (relative) state.initialAdjVal.abs
else 1.0)) {
Some(FunctionValuesConverged)
} else {
None
}
}
override def initialInfo: Info = IndexedSeq(Double.PositiveInfinity)
}
def gradientConverged[T](tolerance: Double, relative: Boolean = true)(
implicit space: NormedModule[T, Double]): ConvergenceCheck[T] = {
import space.normImpl
ConvergenceCheck.fromPartialFunction[T] {
case s: State[T, _, _]
if norm(s.adjustedGradient) <= math.max(tolerance * (if (relative) s.adjustedValue.abs else 1.0), 1E-8) =>
GradientConverged
}
}
def searchFailed[T]: ConvergenceCheck[T] = ConvergenceCheck.fromPartialFunction {
case s: State[_, _, _] if (s.searchFailed) =>
SearchFailed
}
/**
* Runs the function, and if it fails to decreased by at least improvementRequirement numFailures times in a row,
* then we abort
* @param f
* @param numFailures
* @param evalFrequency how often we run the evaluation
* @tparam T
*/
def monitorFunctionValues[T](
f: T => Double,
numFailures: Int = 5,
improvementRequirement: Double = 1E-2,
evalFrequency: Int = 10): ConvergenceCheck[T] =
new MonitorFunctionValuesCheck(f, numFailures, improvementRequirement, evalFrequency)
case class MonitorFunctionValuesCheck[T](
f: T => Double,
numFailures: Int,
improvementRequirement: Double,
evalFrequency: Int)
extends ConvergenceCheck[T]
with SerializableLogging {
case class Info(bestValue: Double, numFailures: Int)
override def update(newX: T, newGrad: T, newVal: Double, oldState: State[T, _, _], oldInfo: Info): Info = {
if (oldState.iter % evalFrequency == 0) {
val newValue = f(newX)
if (newValue <= oldInfo.bestValue * (1 - improvementRequirement)) {
logger.info(f"External function improved: current ${newValue}%.3f old: ${oldInfo.bestValue}%.3f")
Info(numFailures = 0, bestValue = newValue)
} else {
logger.info(
f"External function failed to improve sufficiently! current ${newValue}%.3f old: ${oldInfo.bestValue}%.3f")
oldInfo.copy(numFailures = oldInfo.numFailures + 1)
}
} else {
oldInfo
}
}
override def apply(state: State[T, _, _], info: Info): Option[ConvergenceReason] = {
if (info.numFailures >= numFailures) {
Some(MonitorFunctionNotImproving)
} else {
None
}
}
override def initialInfo: Info = Info(Double.PositiveInfinity, 0)
}
def defaultConvergenceCheck[T](maxIter: Int, tolerance: Double, relative: Boolean = true, fvalMemory: Int = 20)(
implicit space: NormedModule[T, Double]): ConvergenceCheck[T] =
(
maxIterationsReached[T](maxIter) ||
functionValuesConverged(tolerance, relative, fvalMemory) ||
gradientConverged[T](tolerance, relative) ||
searchFailed
)
/**
* OptParams is a Configuration-compatible case class that can be used to select optimization
* routines at runtime.
*
* Configurations:
* 1) useStochastic=false,useL1=false: LBFGS with L2 regularization
* 2) useStochastic=false,useL1=true: OWLQN with L1 regularization
* 3) useStochastic=true,useL1=false: AdaptiveGradientDescent with L2 regularization
* 3) useStochastic=true,useL1=true: AdaptiveGradientDescent with L1 regularization
*
*
* @param batchSize size of batches to use if useStochastic and you give a BatchDiffFunction
* @param regularization regularization constant to use.
* @param alpha rate of change to use, only applies to SGD.
* @param maxIterations, how many iterations to do.
* @param useL1 if true, use L1 regularization. Otherwise, use L2.
* @param tolerance convergence tolerance, looking at both average improvement and the norm of the gradient.
* @param useStochastic if false, use LBFGS or OWLQN. If true, use some variant of Stochastic Gradient Descent.
*/
case class OptParams(
batchSize: Int = 512,
regularization: Double = 0.0,
alpha: Double = 0.5,
maxIterations: Int = 1000,
useL1: Boolean = false,
tolerance: Double = 1E-5,
useStochastic: Boolean = false,
randomSeed: Int = 0) {
private implicit val random: RandBasis = RandBasis.withSeed(randomSeed)
def minimize[T](f: BatchDiffFunction[T], init: T)(implicit space: MutableFiniteCoordinateField[T, _, Double]): T = {
this.iterations(f, init).last.x
}
def minimize[T](f: DiffFunction[T], init: T)(implicit space: MutableEnumeratedCoordinateField[T, _, Double]): T = {
this.iterations(f, init).last.x
}
def iterations[T](f: BatchDiffFunction[T], init: T)(implicit space: MutableFiniteCoordinateField[T, _, Double])
: Iterator[FirstOrderMinimizer[T, BatchDiffFunction[T]]#State] = {
val it = if (useStochastic) {
this.iterations(f.withRandomBatches(batchSize), init)(space)
} else {
iterations(f: DiffFunction[T], init)
}
it.asInstanceOf[Iterator[FirstOrderMinimizer[T, BatchDiffFunction[T]]#State]]
}
def iterations[T](f: StochasticDiffFunction[T], init: T)(implicit space: MutableFiniteCoordinateField[T, _, Double])
: Iterator[FirstOrderMinimizer.State[T, _, _]] = {
val r: StochasticGradientDescent[T] = if (useL1) {
new AdaptiveGradientDescent.L1Regularization[T](regularization, eta = alpha, maxIter = maxIterations)(
space,
random)
} else { // L2
new AdaptiveGradientDescent.L2Regularization[T](regularization, alpha, maxIterations)(space, random)
}
r.iterations(f, init)
}
def iterations[T, K](f: DiffFunction[T], init: T)(
implicit space: MutableEnumeratedCoordinateField[T, K, Double]): Iterator[LBFGS[T]#State] = {
if (useL1) new OWLQN[K, T](maxIterations, 5, regularization, tolerance)(space).iterations(f, init)
else
(new LBFGS[T](maxIterations, 5, tolerance = tolerance)(space))
.iterations(DiffFunction.withL2Regularization(f, regularization), init)
}
}
}
|
scalanlp/breeze
|
math/src/main/scala/breeze/optimize/FirstOrderMinimizer.scala
|
Scala
|
apache-2.0
| 16,156 |
package eu.inn.binders.naming
class UppercaseConverter extends Converter {
def convert(identifier: String): String = identifier.toUpperCase
}
|
InnovaCo/binders
|
src/main/scala/eu/inn/binders/naming/UppercaseConverter.scala
|
Scala
|
bsd-3-clause
| 145 |
import stainless.lang._
import stainless.annotation._
import stainless.collection._
object StateMachine {
sealed trait StateMachine {
type State
type Letter
def initial: State
def next(s: State, l: Letter): Option[State]
def isFinal(s: State): Boolean
@extern @pure
final def isEmpty: Boolean = ???
@extern @pure
final def isTotal: Boolean = ???
final def contains(w0: List[Letter]): Boolean = {
def rec(s: State, w: List[Letter]): Boolean = w match {
case Nil() => isFinal(s)
case Cons(l, ls) => next(s, l) match {
case Some(s1) => rec(s1, ls)
case None() => false
}
}
rec(initial, w0)
}
@extern @pure
final def subsetOf(other: StateMachine): Boolean = ???
}
sealed abstract class MyState
case object A extends MyState
case object B extends MyState
sealed abstract class MyLetter
case object L1 extends MyLetter
case object L2 extends MyLetter
// Recognizes the language: (L1* L2 L2)* L2
case object MyMachine extends StateMachine {
type State = MyState
type Letter = MyLetter
def initial = A
def isFinal(s: State) = { s == B }
def next(s: State, l: Letter): Option[State] = (s, l) match {
case (A, L1) => Some(A)
case (A, L2) => Some(B)
case (B, L2) => Some(A)
case _ => None()
}
}
def tests() = {
// assert(!MyMachine.isEmpty)
// assert(!MyMachine.isTotal)
assert(MyMachine.contains(List(L1, L1, L2, L2, L1, L2)))
assert(!MyMachine.contains(List(L1, L1, L2, L1, L2)))
// assert(MyMachine.contains(List(L1, L2)))
}
}
|
epfl-lara/stainless
|
frontends/benchmarks/verification/valid/StateMachine.scala
|
Scala
|
apache-2.0
| 1,637 |
/**
* Copyright (C) 2015 Orbeon, Inc.
*
* This program is free software; you can redistribute it and/or modify it under the terms of the
* GNU Lesser General Public License as published by the Free Software Foundation; either version
* 2.1 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Lesser General Public License for more details.
*
* The full text of the license is available at http://www.gnu.org/copyleft/lesser.html
*/
package org.orbeon.oxf.fr
trait FormRunnerWizard extends FormRunnerBaseOps {
//@XPathFunction
def isWizardValidate =
formRunnerProperty("oxf.xforms.xbl.fr.wizard.validate")(FormRunnerParams()) contains "true"
}
|
ajw625/orbeon-forms
|
src/main/scala/org/orbeon/oxf/fr/FormRunnerWizard.scala
|
Scala
|
lgpl-2.1
| 877 |
package com.kuwalla.app.service.dao
import com.kuwalla.app.dto.User
/**
* Created by roei.azar on 23/06/2017
*/
trait UserDao {
def get(name: String): User
def update(userDto: User): Boolean
}
|
roei/kuwalla-app
|
kuwalla-app-core/src/main/java/com/kuwalla/app/service/dao/UserDao.scala
|
Scala
|
mit
| 205 |
package org.scalacheck.ops
package time
import org.scalacheck.Gen
import org.scalatest.freespec.AnyFreeSpec
import org.scalatestplus.scalacheck.ScalaCheckPropertyChecks._
import java.time.{Instant, LocalDateTime}
import java.time.temporal.{ChronoField, ChronoUnit}
class TruncatedJavaTimeSpec extends AnyFreeSpec {
"Gen[Instant]" - {
val it = "Gen[Instant]"
s"$it.truncatedToMillis removes all nanoseconds and milliseconds" in {
forAll(Gen.javaInstant.beforeNow.truncatedToMillis) { (instant: Instant) =>
assertResult(0)(instant.getLong(ChronoField.NANO_OF_SECOND) % 1000)
}
}
}
"Gen[LocalDateTime]" - {
val it = "Gen[LocalDateTime]"
s"$it.truncatedToMillis removes all nanoseconds and milliseconds" in {
forAll(Gen.javaLocalDateTime.beforeNow.truncatedToMillis) { (datetime: LocalDateTime) =>
assertResult(0)(datetime.getLong(ChronoField.NANO_OF_SECOND) % 1000)
}
}
s"$it.truncatedTo(ChronoUnit.SECONDS) removes all milliseconds" in {
forAll(Gen.javaLocalDateTime.beforeNow.truncatedTo(ChronoUnit.SECONDS)) { (datetime: LocalDateTime) =>
assertResult(0)(datetime.getLong(ChronoField.MILLI_OF_SECOND))
}
}
s"$it.truncatedTo(ChronoUnit.MINUTES) removes all seconds" in {
forAll(Gen.javaLocalDateTime.beforeNow.truncatedTo(ChronoUnit.MINUTES)) { (datetime: LocalDateTime) =>
assertResult(0)(datetime.getLong(ChronoField.SECOND_OF_MINUTE))
}
}
s"$it.truncatedTo(ChronoUnit.HOURS) removes all seconds" in {
forAll(Gen.javaLocalDateTime.beforeNow.truncatedTo(ChronoUnit.HOURS)) { (datetime: LocalDateTime) =>
assertResult(0)(datetime.getLong(ChronoField.MINUTE_OF_HOUR))
}
}
}
}
|
gloriousfutureio/scalacheck-ops
|
core/src/test/scala/org/scalacheck/ops/time/TruncatedJavaTimeSpec.scala
|
Scala
|
apache-2.0
| 1,740 |
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License")
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.kayenta.judge.detectors
abstract class BaseOutlierDetector{
/**
* Determine which data points are outliers
* @param data array of samples
* @return boolean array indicating which data points are anomalies
*/
def detect(data: Array[Double]): Array[Boolean]
}
|
spinnaker/kayenta
|
kayenta-judge/src/main/scala/com/netflix/kayenta/judge/detectors/BaseOutlierDetector.scala
|
Scala
|
apache-2.0
| 895 |
package club.diybio.bank.templates
import scalacss.Defaults._
import scalacss.ScalatagsCss._
import scalatags.Text._
import scalatags.Text.all._
object Index {
lazy val items = for(e <- 0 until 5) yield a(`class` :="item",i(`class`:="lab icon"),s"Item #$e") //some test data
lazy val HEAD = head(
title := "Plasmids bank",
link(rel := "stylesheet", href := "styles/mystyles.css"), //my scalacss styles
script(`type` := "text/javascript", src := "lib/jquery/jquery.min.js"),
link(rel := "stylesheet", href := "lib/selectize.js/css/selectize.css"),
link(rel := "stylesheet", href := "lib/selectize.js/css/selectize.default.css"),
script(`type` := "text/javascript", src := "lib/selectize.js/js/standalone/selectize.js"), //for nice select boxes
link(rel := "stylesheet", href := "lib/Semantic-UI/semantic.css"),
script(`type` := "text/javascript", src := "lib/Semantic-UI/semantic.js") //http://semantic-ui.com/ is nice alternative to bootstrap
)
lazy val MENU = div(`class` := "ui labeled green icon menu",items )
lazy val MAIN = div(`class` := "ui green segment",h1("Hello World!"),
p(id:="hello",`class`:="desc","here will be hello world!")
)
lazy val scripts = Seq(
script(src:="resources/frontend-fastopt.js"),
script(src:="resources/frontend-launcher.js")
)
lazy val BODY = body( MENU, MAIN, div(scripts )
)
lazy val HTML = html(HEAD,BODY)
lazy val template = HTML.render
}
|
antonkulaga/plasmid-bank
|
backend/src/main/scala/club/diybio/bank/templates/Index.scala
|
Scala
|
mpl-2.0
| 1,461 |
import org.apache.log4j.{Level, Logger}
import org.apache.spark.graphx._
import org.apache.spark.graphx.util.GraphGenerators
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}
import scala.collection.immutable.Map
object ClusterWildV03Dimitris {
def main(args: Array[String]) = {
Logger.getLogger("org").setLevel(Level.WARN)
Logger.getLogger("akka").setLevel(Level.WARN)
val sc = new SparkContext()
val argmap : Map[String,String] = args.map { a =>
val argPair = a.split("=")
val name = argPair(0).toLowerCase
val value = argPair(1)
(name, value)
}.toMap
val graphType : String = argmap.getOrElse("graphtype", "rmat").toString.toLowerCase
val rMatNumEdges : Int = argmap.getOrElse("rmatnumedges", 100000000).toString.toInt
val path : String = argmap.getOrElse("path", "graphs/astro.edges").toString
val numPartitions : Int = argmap.getOrElse("numpartitions", 640).toString.toInt
/*
var graph: Graph[Int, Int] = GraphGenerators.rmatGraph(sc, requestedNumVertices = 1e8.toInt, numEdges = 1e8.toInt).mapVertices( (id, _) => -100.toInt )
// val path = "/Users/dimitris/Documents/graphs/astro.txt"
// val numPartitions = 4
// val graph: Graph[(Int), Int] = GraphLoader.edgeListFile(sc, path, false, numPartitions)
*/
val graph: Graph[(Int), Int] =
if (graphType == "rmat")
GraphGenerators.rmatGraph(sc, requestedNumVertices = rMatNumEdges.toInt, numEdges = rMatNumEdges.toInt).mapVertices( (id, _) => -100.toInt )
else
GraphLoader.edgeListFile(sc, path, false, numPartitions)
System.out.println(s"Graph has ${graph.vertices.count} vertices (${graph.vertices.partitions.length} partitions), ${graph.edges.count} edges (${graph.edges.partitions.length} partitions)")
//The following is needed for undirected (bi-directional edge) graphs
val vertexRDDs: VertexRDD[Int] = graph.vertices
var edgeRDDs: RDD[Edge[Int]] = graph.edges.reverse.union(graph.edges)
// val graph: Graph[(Int), Int] = Graph(vertexRDDs,edgeRDDs).mapVertices( (id, _) => -100.toInt )
var unclusterGraph: Graph[(Int), Int] = Graph(vertexRDDs, edgeRDDs).mapVertices((id, _) => -100.toInt)
var prevUnclusterGraph: Graph[(Int), Int] = null
val epsilon: Double = 2
var x: Int = 1
var clusterUpdates: RDD[(org.apache.spark.graphx.VertexId, Int)] = null
var randomSet: RDD[(org.apache.spark.graphx.VertexId, Int)] = null
var newVertices: RDD[(org.apache.spark.graphx.VertexId, Int)] = null
var numNewCenters: Long = 0
var maxDegree: VertexRDD[Int] = unclusterGraph.aggregateMessages[Int](
triplet => {
if (triplet.dstAttr == -100 & triplet.srcAttr == -100) {
triplet.sendToDst(1)
}
}, _ + _).cache()
var maxDeg: Int = if (maxDegree.count == 0) 0 else maxDegree.map(x => x._2).reduce((a, b) => math.max(a, b))
while (maxDeg >= 1) {
val time0 = System.currentTimeMillis
numNewCenters = 0
while (numNewCenters == 0) {
randomSet = unclusterGraph.vertices.filter(v => v._2 == -100).sample(false, math.min(epsilon / maxDeg, 1), scala.util.Random.nextInt(1000))
numNewCenters = randomSet.count
}
// System.out.println(s"Cluster Centers ${randomSet.collect().toList}.")
// prevUnclusterGraph = unclusterGraph
unclusterGraph = unclusterGraph.joinVertices(randomSet)((vId, attr, active) => -1).cache()
// prevUnclusterGraph.vertices.unpersist(false)
// prevUnclusterGraph.edges.unpersist(false)
clusterUpdates = unclusterGraph.aggregateMessages[Int](
triplet => {
if (triplet.dstAttr == -100 & triplet.srcAttr == -1) {
triplet.sendToDst(triplet.srcId.toInt)
}
}, math.min(_, _)
)
newVertices = unclusterGraph.vertices.leftJoin(clusterUpdates) {
(id, oldValue, newValue) =>
newValue match {
case Some(x: Int) => x
case None => {
if (oldValue == -1) id.toInt; else oldValue;
}
}
}
// prevUnclusterGraph = unclusterGraph
unclusterGraph = unclusterGraph.joinVertices(newVertices)((vId, oldAttr, newAttr) => newAttr).cache()
// prevUnclusterGraph.vertices.unpersist(false)
// prevUnclusterGraph.edges.unpersist(false)
maxDegree = unclusterGraph.aggregateMessages[Int](
triplet => {
if (triplet.dstAttr == -100 & triplet.srcAttr == -100) {
triplet.sendToDst(1)
}
}, _ + _
).cache()
maxDeg = if (maxDegree.count == 0) 0 else maxDegree.map(x => x._2).reduce((a, b) => math.max(a, b))
// System.out.println(s"new maxDegree $maxDeg.")
// System.out.println(s"ClusterWild! finished iteration $x.")
val time1 = System.currentTimeMillis
System.out.println(
s"$x\\t" +
s"$maxDeg\\t" +
s"$numNewCenters\\t" +
s"${time1 - time0}\\t" +
"")
x = x + 1
}
//Take care of degree 0 nodes
val time10 = System.currentTimeMillis
newVertices = unclusterGraph.subgraph(vpred = (vId, clusterID) => clusterID == -100).vertices
numNewCenters = newVertices.count
newVertices = unclusterGraph.vertices.leftJoin(newVertices) {
(id, oldValue, newValue) =>
newValue match {
case Some(x: Int) => id.toInt
case None => oldValue;
}
}
unclusterGraph = unclusterGraph.joinVertices(newVertices)((vId, oldAttr, newAttr) => newAttr).cache()
unclusterGraph.edges.foreachPartition(x => {}) // also materializes rankGraph.vertices
val time11 = System.currentTimeMillis
System.out.println(
s"$x\\t" +
s"$maxDeg\\t" +
s"${newVertices}\\t" +
s"${time11 - time10}\\t" +
"<end>")
// unclusterGraph = unclusterGraph.mapVertices((id,clusterID) => v == 1)
// unclusterGraph.vertices.collect
// // unhappy edges accross clusters
// val unhappyFriends: Float = unclusterGraph.triplets.filter(e=> e.dstAttr != e.srcAttr).count/2
// // compute cluster sizes
// val clusterSizes: List[Float] = unclusterGraph.vertices.map(v=> v._2).countByValue.map(v => v._2).toList.map(_.toFloat)
// // compute missing edges inside clusters
// val tripletsWithSameID: Float = unclusterGraph.triplets.filter(e=> e.dstAttr == e.srcAttr).count/2
// //Cost
// val costClusterWild = (clusterSizes.map( x=> x*(x-1)/2).sum - tripletsWithSameID) + unhappyFriends
}
}
|
anadim/clusterWild
|
src/main/scalaPre20150324/ClusterWildV03Dimitris.scala
|
Scala
|
apache-2.0
| 6,578 |
/**
* The MIT License (MIT)
* <p/>
* Copyright (c) 2016 ScalateKids
* <p/>
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
* <p/>
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
* <p/>
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
* <p/>
* @author Scalatekids
* @version 1.0
* @since 1.0
*/
package com.actorbase.driver.data
import com.actorbase.driver.ActorbaseDriver.Connection
import com.actorbase.driver.client.Connector
import com.actorbase.driver.client.api.RestMethods._
import com.actorbase.driver.client.api.RestMethods.Status._
import com.actorbase.driver.exceptions._
import org.json4s._
import org.json4s.jackson.JsonMethods._
import org.json4s.jackson.Serialization
import org.json4s.JsonDSL._
import java.io.{FileOutputStream, File, PrintWriter}
import scala.collection.immutable.TreeMap
import scala.collection.generic.FilterMonadic
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.{ Await, Future }
import scala.concurrent.duration.Duration
case class SingleResponse(response: Any)
/**
* Class representing a single collection of the database at the current state,
* that is the state at the very moment of the query time of the system. It
* contains key-value pairs representing items of the collection and exposes
* some utility methods to navigate and modify the contents in an easier way,
* reflecting all changes directly to the remote counterpart.
*/
case class ActorbaseCollection
(val owner: String, var collectionName: String,
var contributors: Map[String, Boolean] = Map.empty[String, Boolean],
var data: TreeMap[String, Any] = new TreeMap[String, Any]())(implicit val conn: Connection, implicit val scheme: String)
extends Connector {
implicit val formats = DefaultFormats
/**
* default scheme + uri to access server side actorbase
*/
val uri: String = scheme + conn.address + ":" + conn.port
/**
* Insert an arbitrary variable number of key-value tuple to the collection
* reflecting local changes to remote collection on server-side
*
* @param kv a vararg Tuple2 of type (String, Any)
* @return an object of type ActorbaseCollection representing the collection updated
* @throws WrongCredentialsExc in case of wrong username or password, or non-existant ones
* @throws InternalErrorExc in case of internal server error
* @throws UndefinedCollectionExc in case of undefined collection
* @throws DuplicateKeyExc in case of duplicate key
*/
@throws(classOf[WrongCredentialsExc])
@throws(classOf[InternalErrorExc])
@throws(classOf[UndefinedCollectionExc])
@throws(classOf[DuplicateKeyExc])
def insert(kv: (String, Any)*): ActorbaseCollection = {
if(kv.length > 1)
asyncInsert(kv:_*)
else {
for ((k, v) <- kv) {
val response = requestBuilder
.withCredentials(conn.username, conn.password)
.withUrl(uri + "/collections/" + collectionName + "/" + k)
.withBody(serialize(v))
.addHeaders("owner" -> toBase64FromString(owner))
.withMethod(POST).send()
response.statusCode match {
case Unauthorized | Forbidden => throw WrongCredentialsExc("Credentials privilege level does not meet criteria needed to perform this operation")
case Error => throw InternalErrorExc("There was an internal server error, something wrong happened")
case BadRequest => throw InternalErrorExc("Invalid or malformed request")
case OK =>
response.body map { x =>
x.asInstanceOf[String] match {
case "UndefinedCollection" => throw UndefinedCollectionExc("Undefined collection")
case "DuplicatedKey" => throw DuplicateKeyExc("Inserting duplicate key")
case "NoPrivileges" => throw WrongCredentialsExc("Insufficient permissions")
case "InvalidChar" => throw InternalErrorExc("Invalid or malformed request")
case _ => data += (k -> v)
}
}
case _ =>
}
}
ActorbaseCollection(owner, collectionName, contributors, data)
}
}
/**
* Service method, provide async requests in case of kv length > 1
*
* @param kv a vararg Tuple2 of type (String, Any)
* @return an object of type ActorbaseCollection representing the collection updated
* @throws WrongCredentialsExc in case of wrong username or password, or non-existant ones
* @throws InternalErrorExc in case of internal server error
* @throws UndefinedCollectionExc in case of undefined collection
* @throws DuplicateKeyExc in case of duplicate key
*/
@throws(classOf[WrongCredentialsExc])
@throws(classOf[InternalErrorExc])
@throws(classOf[UndefinedCollectionExc])
@throws(classOf[DuplicateKeyExc])
private def asyncInsert(kv: (String, Any)*): ActorbaseCollection = {
val futureList = Future.traverse(kv)(keyVal =>
Future {
(keyVal._1 -> keyVal._2 -> requestBuilder
.withCredentials(conn.username, conn.password)
.withUrl(uri + "/collections/" + collectionName + "/" + keyVal._1)
.withBody(serialize(keyVal._2))
.addHeaders("owner" -> toBase64FromString(owner))
.withMethod(POST).send())
})
val listOfFutures = futureList.map { x =>
x map { response =>
response._2.statusCode match {
case Unauthorized | Forbidden => throw WrongCredentialsExc("Credentials privilege level does not meet criteria needed to perform this operation")
case Error => throw InternalErrorExc("There was an internal server error, something wrong happened")
case BadRequest => throw InternalErrorExc("Invalid or malformed request")
case OK =>
response._2.body map { x =>
x.asInstanceOf[String] match {
case "UndefinedCollection" => throw UndefinedCollectionExc("Undefined collection")
case "DuplicatedKey" => throw DuplicateKeyExc("Inserting duplicate key")
case "NoPrivileges" => throw WrongCredentialsExc("Insufficient permissions")
case "InvalidChar" => throw InternalErrorExc("Invalid or malformed request")
case _ => data += (response._1._1 -> response._1._2)
}
}
case _ =>
}
}
}
Await.result(listOfFutures, Duration.Inf)
ActorbaseCollection(owner, collectionName, contributors, data)
}
/**
* Insert a new key-value tuple, representing an ActorbaseObject to the
* collection reflecting local changes to remote collection on server-side
*
* @param kv an ActorbaseObject parameter representing a key/value pair
* @return an object of type ActorbaseCollection representing the collection updated
* @throws
*/
def insert[A >: Any](kv: ActorbaseObject[A]): ActorbaseCollection = this.insert(kv.toSeq:_*)
/**
* Update one or more key-value of the collection, reflecting changes
* directly on remote system
*
* @param kv a vararg of Tuple2[String, Any] representing key-value pairs to be updated in the system
* @return an object of ActorbaseCollection containing the updated keys
* @throws WrongCredentialsExc in case of wrong username or password, or non-existant ones
* @throws InternalErrorExc in case of internal server error
* @throws UndefinedCollectionExc in case of undefined collection
*/
@throws(classOf[WrongCredentialsExc])
@throws(classOf[InternalErrorExc])
@throws(classOf[UndefinedCollectionExc])
def update(kv: (String, Any)*): ActorbaseCollection = {
if(kv.length > 1)
asyncUpdate(kv:_*)
else {
for ((k, v) <- kv) {
val response = requestBuilder
.withCredentials(conn.username, conn.password)
.withUrl(uri + "/collections/" + collectionName + "/" + k)
.withBody(serialize(v))
.addHeaders("owner" -> toBase64FromString(owner))
.withMethod(PUT).send()
response.statusCode match {
case Unauthorized | Forbidden => throw WrongCredentialsExc("Credentials privilege level does not meet criteria needed to perform this operation")
case Error => throw InternalErrorExc("There was an internal server error, something wrong happened")
case OK =>
response.body map { x =>
x.asInstanceOf[String] match {
case "UndefinedCollection" => throw UndefinedCollectionExc("Undefined collection")
case "NoPrivileges" => throw WrongCredentialsExc("Insufficient permissions")
case "InvalidChar" => throw InternalErrorExc("Invalid or malformed request")
case _ =>
data -= k
data += (k -> v)
}
}
case _ =>
}
}
ActorbaseCollection(owner, collectionName, contributors, data)
}
}
/**
* Service method, provide async requests in case of kv length > 1
*
* @param kv a vararg of Tuple2[String, Any] representing key-value pairs to be updated in the system
* @return an object of ActorbaseCollection containing the updated keys
* @throws WrongCredentialsExc in case of wrong username or password, or non-existant ones
* @throws InternalErrorExc in case of internal server error
* @throws UndefinedCollectionExc in case of undefined collection
*/
@throws(classOf[WrongCredentialsExc])
@throws(classOf[InternalErrorExc])
@throws(classOf[UndefinedCollectionExc])
private def asyncUpdate(kv: (String, Any)*): ActorbaseCollection = {
val futureList = Future.traverse(kv)(keyVal =>
Future {
(keyVal._1 -> keyVal._2 -> requestBuilder
.withCredentials(conn.username, conn.password)
.withUrl(uri + "/collections/" + collectionName + "/" + keyVal._1)
.withBody(serialize(keyVal._2))
.addHeaders("owner" -> toBase64FromString(owner))
.withMethod(PUT).send())
})
val listOfFutures = futureList.map { x =>
x map { response =>
response._2.statusCode match {
case Unauthorized | Forbidden => throw WrongCredentialsExc("Credentials privilege level does not meet criteria needed to perform this operation")
case Error => throw InternalErrorExc("There was an internal server error, something wrong happened")
case BadRequest => throw InternalErrorExc("Invalid or malformed request")
case OK =>
response._2.body map { x =>
x.asInstanceOf[String] match {
case "UndefinedCollection" => throw UndefinedCollectionExc("Undefined collection")
case "DuplicatedKey" => throw DuplicateKeyExc("Inserting duplicate key")
case "NoPrivileges" => throw WrongCredentialsExc("Insufficient permissions")
case "InvalidChar" => throw InternalErrorExc("Invalid or malformed request")
case _ =>
data -= response._1._1
data += (response._1._1 -> response._1._2)
}
}
case _ =>
}
}
}
Await.result(listOfFutures, Duration.Inf)
ActorbaseCollection(owner, collectionName, contributors, data)
}
/**
* Remove an arbitrary variable number of key-value tuple from the collection
* reflecting local changes to remote collection on server-side
*
* @param keys a vararg String representing a sequence of keys to be removed
* from the collection
* @return an object of ActorbaseCollection containing the updated keys
* @throws WrongCredentialsExc in case of wrong username or password, or non-existant ones
* @throws InternalErrorExc in case of internal server error
* @throws UndefinedCollectionExc in case of undefined collection
*/
@throws(classOf[WrongCredentialsExc])
@throws(classOf[InternalErrorExc])
@throws(classOf[UndefinedCollectionExc])
def remove(keys: String*): ActorbaseCollection = {
if(keys.length > 1)
asyncRemove(keys:_*)
else {
keys.foreach { key =>
val response = requestBuilder withCredentials(conn.username, conn.password) withUrl uri + "/collections/" + collectionName + "/" + key withMethod DELETE send()
response.statusCode match {
case Unauthorized | Forbidden => throw WrongCredentialsExc("Credentials privilege level does not meet criteria needed to perform this operation")
case Error => throw InternalErrorExc("There was an internal server error, something wrong happened")
case OK =>
response.body map { x =>
x.asInstanceOf[String] match {
case "UndefinedCollection" => throw UndefinedCollectionExc("Undefined collection")
case "NoPrivileges" => throw WrongCredentialsExc("Insufficient permissions")
case _ => data -= key
}
}
case _ =>
}
}
ActorbaseCollection(owner, collectionName, contributors, data)
}
}
/**
* Service method, provide async request in case of keys > 1
*
* @param keys a vararg String representing a sequence of keys to be removed
* from the collection
* @return an object of ActorbaseCollection containing the updated keys
* @throws WrongCredentialsExc in case of wrong username or password, or non-existant ones
* @throws InternalErrorExc in case of internal server error
* @throws UndefinedCollectionExc in case of undefined collection
*/
@throws(classOf[WrongCredentialsExc])
@throws(classOf[InternalErrorExc])
@throws(classOf[UndefinedCollectionExc])
private def asyncRemove(keys: String*): ActorbaseCollection = {
val futureList = Future.traverse(keys)(key =>
Future {
(key -> requestBuilder.withCredentials(conn.username, conn.password).withUrl(uri + "/collections/" + collectionName + "/" + key).withMethod(DELETE).send())
})
val listOfFutures = futureList.map { x =>
x map { response =>
response._2.statusCode match {
case Unauthorized | Forbidden => throw WrongCredentialsExc("Credentials privilege level does not meet criteria needed to perform this operation")
case Error => throw InternalErrorExc("There was an internal server error, something wrong happened")
case OK =>
response._2.body map { x =>
x.asInstanceOf[String] match {
case "UndefinedCollection" => throw UndefinedCollectionExc("Undefined collection")
case "NoPrivileges" => throw WrongCredentialsExc("Insufficient permissions")
case _ => data -= response._1
}
}
case _ =>
}
}
}
Await.result(listOfFutures, Duration.Inf)
ActorbaseCollection(owner, collectionName, contributors, data)
}
/**
* Remove a key-value tuple, representing an ActorbaseObject from the
* collection reflecting local changes to remote collection on server-side
*
* @param kv an ActorbaseObject parameter representing a key/value pair
* @return an object of ActorbaseCollection containing the updated keys
* @throws WrongCredentialsExc in case of wrong username or password, or non-existant ones
* @throws InternalErrorExc in case of internal server error
*/
@throws(classOf[WrongCredentialsExc])
@throws(classOf[InternalErrorExc])
def remove[A >: Any](kv: ActorbaseObject[A]): ActorbaseCollection = this.remove(kv.keys.toSeq:_*)
/**
* Return all the contents of the collection in an ActorbaseObject
*
* @return an object of type ActorbaseObject
* @throws
*/
def find[A >: Any]: ActorbaseObject[A] = find()
/**
* Find an arbitrary number of elements inside the collection, returning an
* ActorbaseObject, return all then contents of the collection if the vararg
* passed asinstanceof parameter is empty
*
* @param keys a vararg String representing a sequence of keys to be retrieved
* @return an object of type ActorbaseObject
* @throws WrongCredentialsExc in case of credentials not valid
* @throws InternalErrorExc in case of internal error on the server side
*/
@throws(classOf[WrongCredentialsExc])
@throws(classOf[InternalErrorExc])
def find[A >: Any](keys: String*): ActorbaseObject[A] = {
if(keys.length == 0) ActorbaseObject(data)
else if(keys.length == 1) {
var buffer = TreeMap[String, Any]().empty
keys.foreach { key =>
if (data.contains(key))
data get key map (k => buffer += (key -> k))
else {
val response = requestBuilder
.withCredentials(conn.username, conn.password)
.withUrl(uri + "/collections/" + collectionName + "/" + key)
.addHeaders("owner" -> toBase64FromString(owner))
.withMethod(GET).send()
response.statusCode match {
case Unauthorized | Forbidden => throw WrongCredentialsExc("Credentials privilege level does not meet criteria needed to perform this operation")
case BadRequest => throw InternalErrorExc("Invalid or malformed request")
case Error => throw InternalErrorExc("There was an internal server error, something wrong happened")
case OK =>
response.body map { content =>
val ret = parse(content).extract[SingleResponse]
buffer += (key -> ret.response)
}
case _ =>
}
}
}
data ++= buffer
ActorbaseObject(buffer.toMap)
} else asyncFind(keys:_*)
}
/**
* Service method, provide async requests in case of keys > 1
*
* @param keys a vararg String representing a sequence of keys to be retrieved
* @return an object of type ActorbaseObject
* @throws WrongCredentialsExc in case of credentials not valid
* @throws InternalErrorExc in case of internal error on the server side
*/
@throws(classOf[WrongCredentialsExc])
@throws(classOf[InternalErrorExc])
private def asyncFind[A >: Any](keys: String*): ActorbaseObject[A] = {
var buffer = TreeMap.empty[String, Any]
val futureList = Future.traverse(keys)(key =>
Future {
(key -> requestBuilder
.withCredentials(conn.username, conn.password)
.withUrl(uri + "/collections/" + collectionName + "/" + key)
.addHeaders("owner" -> toBase64FromString(owner))
.withMethod(GET).send())
})
val listOfFutures = futureList.map { x =>
x map { response =>
response._2.statusCode match {
case Unauthorized | Forbidden => throw WrongCredentialsExc("Credentials privilege level does not meet criteria needed to perform this operation")
case BadRequest => throw InternalErrorExc("Invalid or malformed request")
case Error => throw InternalErrorExc("There was an internal server error, something wrong happened")
case OK =>
response._2.body map { content =>
val ret = parse(content).extract[SingleResponse]
buffer += (response._1 -> ret.response)
}
case _ =>
}
}
}
Await.result(listOfFutures, Duration.Inf)
data ++= buffer
ActorbaseObject(buffer.toMap)
}
/**
* Find an element inside the collection, returning an ActorbaseObject
* representing the key/value pair
*
* @param key a String representing the key associated to the value to be retrieved
* @return an object of type ActorbaseObject
*/
def findOne[A >: Any](key: String): Option[ActorbaseObject[A]] = {
if (data.contains(key))
Some(ActorbaseObject(key -> data.get(key).getOrElse(None)))
else {
var buffer: Option[ActorbaseObject[A]] = None
val response = requestBuilder
.withCredentials(conn.username, conn.password)
.withUrl(uri + "/collections/" + collectionName + "/" + key)
.addHeaders("owner" -> toBase64FromString(owner))
.withMethod(GET).send()
response.statusCode match {
case Unauthorized | Forbidden => throw WrongCredentialsExc("Credentials privilege level does not meet criteria needed to perform this operation")
case BadRequest => throw InternalErrorExc("Invalid or malformed request")
case Error => throw InternalErrorExc("There was an internal server error, something wrong happened")
case OK =>
response.body map { content =>
val ret = parse(content).extract[SingleResponse]
buffer = Some(ActorbaseObject(key -> ret.response))
}
case _ =>
}
buffer
}
}
/**
* Add a contributor to the collection, updating the remote system
* counterpart
*
* @param username a String representing the username of the user
* @param write a Boolean flag representing the permissions of the contributor
* @return no return value
* @throws WrongCredentialsExc in case of wrong username or password, or non-existant ones
* @throws InternalErrorExc in case of internal server error
* @throws UndefinedUsernameExc in case of username not found on the remote system
* @throws UsernameAlreadyExistsExc in case of username already present inside the contributors list
*/
@throws(classOf[WrongCredentialsExc])
@throws(classOf[InternalErrorExc])
@throws(classOf[UndefinedUsernameExc])
@throws(classOf[UsernameAlreadyExistsExc])
def addContributor(username: String, write: Boolean = false): Unit = {
val permission = if (!write) "read" else "readwrite"
val response = requestBuilder
.withCredentials(conn.username, conn.password)
.withUrl(uri + "/contributors/" + collectionName)
.withBody(toBase64(username.getBytes("UTF-8")))
.addHeaders("permission" -> toBase64FromString(permission), "owner" -> toBase64FromString(conn.username))
.withMethod(POST).send()
response.statusCode match {
case Unauthorized | Forbidden => throw WrongCredentialsExc("Credentials privilege level does not meet criteria needed to perform this operation")
case Error => throw InternalErrorExc("There was an internal server error, something wrong happened")
case OK =>
response.body map { r =>
r.asInstanceOf[String] match {
case "UndefinedUsername" => throw UndefinedUsernameExc("Undefined username: Actorbase does not contains such credential")
case "UsernameAlreadyExists" => throw UsernameAlreadyExistsExc("Username already in contributors for the given collection")
case "NoPrivileges" => throw WrongCredentialsExc("Insufficient permissions")
case "OK" => if (!contributors.contains(username)) contributors += (username -> write)
}
}
}
}
/**
* Remove a contributor from the collection, updating the remote system
* counterpart
*
* @param username a String representing the username of the user
* @return no return value
* @throws WrongCredentialsExc in case of wrong username or password, or non-existant ones
* @throws InternalErrorExc in case of internal server error
* @throws UndefinedUsernameExc in case of username not found on the remote system
* @throws UsernameAlreadyExistsExc in case of username already present inside the contributors list
*/
@throws(classOf[WrongCredentialsExc])
@throws(classOf[InternalErrorExc])
def removeContributor(username: String): Unit = {
val response = requestBuilder
.withCredentials(conn.username, conn.password)
.withUrl(uri + "/contributors/" + collectionName)
.addHeaders("owner" -> toBase64FromString(conn.username))
.withBody(toBase64(username.getBytes("UTF-8")))
.withMethod(DELETE).send()
response.statusCode match {
case Unauthorized | Forbidden => throw WrongCredentialsExc("Credentials privilege level does not meet criteria needed to perform this operation")
case Error => throw InternalErrorExc("There was an internal server error, something wrong happened")
case OK =>
response.body map { r =>
r.asInstanceOf[String] match {
case "UndefinedUsername" => throw UndefinedUsernameExc("Undefined username: Actorbase does not contains such credential")
case "NoPrivileges" => throw WrongCredentialsExc("Insufficient permissions")
case "OK" => if (contributors contains username) contributors -= username
}
}
}
}
/**
* Drop the entire collection, reflecting the local change to remote on
* server-side
*
* @throws WrongCredentialsExc in case of wrong username or password, or non-existant ones
* @throws InternalErrorExc in case of internal server error
*/
@throws(classOf[WrongCredentialsExc])
@throws(classOf[InternalErrorExc])
def drop: Unit = {
data = data.empty
val response = requestBuilder withCredentials(conn.username, conn.password) withUrl uri + "/collections/" + collectionName withMethod DELETE send()
response.statusCode match {
case Unauthorized | Forbidden => throw WrongCredentialsExc("Credentials privilege level does not meet criteria needed to perform this operation")
case Error => throw InternalErrorExc("There was an internal server error, something wrong happened")
case _ =>
}
}
/**
* Count the number of items inside this collection
*
* @return an Int, represents the number of key-value pair
*/
def count: Int = data.size
/**
* Export the contente of the current collection to a given path on
* the filesystem, JSON formatted
*
* @param path a String representing the path on the filesystem where
* the JSON file will be saved
* @param append if false this method will overwrite the file that's already in the given path,
* if true it will append the exported collection to the end of the file
* @return no return value
*/
def export(path: String, append: Boolean = false): Unit = {
val exportTo = new File(path)
if (!exportTo.exists)
try{
exportTo.getParentFile.mkdirs
} catch {
case np: NullPointerException =>
}
// printWriter.write(serialize2JSON(this))
if(!append){ //if append is false it overwrites everything on the file
val printWriter = new PrintWriter(exportTo)
printWriter.write(toString)
printWriter.close
}
else{ //append==true, it adds the collection to the end of the file
val printWriter = new PrintWriter(new FileOutputStream(exportTo, true))
printWriter.append(",\n")
printWriter.append(toString)
printWriter.close
}
}
/**
* Foreach method, applies a function f to all elements of this map.
*
* @param f the function that is applied for its side-effect to every element.
* The result of function f is discarded.
* @return no return value
*/
def foreach(f: ((String, Any)) => Unit): Unit = data.foreach(f)
/**
* Creates a non-strict filter of this traversable collection.
*
* @param p the predicate used to test elements.
* @return an object of class WithFilter, which supports map, flatMap, foreach,
* and withFilter operations. All these operations apply to those elements of
* this traversable collection which satisfy the predicate p.
*/
def withFilter(p: ((String, Any)) => Boolean): FilterMonadic[(String, Any), TreeMap[String, Any]] = data.withFilter(p)
/**
* Converts this collection to a string.
*
* @param
* @return a string representation of this collection. By default this string
* consists of a JSON containing the colleciton name, the owner and items
*/
override def toString: String = {
var headers = new TreeMap[String, Any]()
headers += ("collectionName" -> collectionName)
headers += ("owner" -> owner)
headers += ("contributors" -> contributors)
headers += ("data" -> data)
toJSON(headers)
}
}
|
ScalateKids/Actorbase-Client
|
src/main/scala/com/actorbase/driver/data/ActorbaseCollection.scala
|
Scala
|
mit
| 29,038 |
package com.aristocrat.mandrill.requests.Templates
import com.aristocrat.mandrill.requests.MandrillRequest
case class List(key: String, label: String) extends MandrillRequest
|
aristocratic/mandrill
|
src/main/scala/com/aristocrat/mandrill/requests/Templates/List.scala
|
Scala
|
mit
| 177 |
package scribe.json
import fabric.parse.Json
import scribe.LogRecord
import scribe.output.{LogOutput, TextOutput}
import scribe.output.format.OutputFormat
import scribe.writer.Writer
import perfolation._
import fabric.rw._
import fabric._
import scribe.message.Message
case class JsonWriter(writer: Writer, compact: Boolean = true) extends Writer {
override def write[M](record: LogRecord[M], output: LogOutput, outputFormat: OutputFormat): Unit = {
val l = record.timeStamp
val traces = record.additionalMessages.collect {
case message: Message[_] if message.value.isInstanceOf[Throwable] => throwable2Trace(message.value.asInstanceOf[Throwable])
}
val additionalMessages = record.additionalMessages.map(_.logOutput.plainText)
val r = Record(
level = record.level.name,
levelValue = record.levelValue,
message = record.logOutput.plainText,
additionalMessages = additionalMessages,
fileName = record.fileName,
className = record.className,
methodName = record.methodName,
line = record.line,
column = record.column,
data = record.data.map {
case (key, value) => value() match {
case value: Value => key -> value
case any => key -> str(any.toString)
}
},
traces = traces,
timeStamp = l,
date = l.t.F,
time = s"${l.t.T}.${l.t.L}${l.t.z}"
)
val json = r.toValue
val jsonString = if (compact) {
fabric.parse.JsonWriter.Compact(json)
} else {
fabric.parse.JsonWriter.Default(json)
}
writer.write(record, new TextOutput(jsonString), outputFormat)
}
private def throwable2Trace(throwable: Throwable): Trace = {
val elements = throwable.getStackTrace.toList.map { e =>
TraceElement(e.getClassName, e.getMethodName, e.getLineNumber)
}
Trace(throwable.getLocalizedMessage, elements, Option(throwable.getCause).map(throwable2Trace))
}
}
case class Record(level: String,
levelValue: Double,
message: String,
additionalMessages: List[String],
fileName: String,
className: String,
methodName: Option[String],
line: Option[Int],
column: Option[Int],
data: Map[String, Value],
traces: List[Trace],
timeStamp: Long,
date: String,
time: String)
object Record {
implicit val mapRW: ReaderWriter[Map[String, Value]] = ReaderWriter[Map[String, Value]](identity, _.asObj.value)
implicit val rw: ReaderWriter[Record] = ccRW
}
case class Trace(message: String, elements: List[TraceElement], cause: Option[Trace])
object Trace {
implicit val rw: ReaderWriter[Trace] = ccRW
}
case class TraceElement(`class`: String, method: String, line: Int)
object TraceElement {
implicit val rw: ReaderWriter[TraceElement] = ccRW
}
|
outr/scribe
|
json/shared/src/main/scala/scribe/json/JsonWriter.scala
|
Scala
|
mit
| 2,957 |
object Wrap1 {
class E
object E {
final val A = E()
val $values = Array(A)
}
}
object Wrap2 {
class E
object E {
final val A = E()
val ref = A
}
}
|
dotty-staging/dotty
|
tests/init/pos/i9664.scala
|
Scala
|
apache-2.0
| 175 |
package dbpedia.dataparsers.util.wikiparser
/**
* Represents plain text.
*
* @param text The text
* @param line The source line number where this text begins
*/
case class TextNode(text : String, override val line : Int) extends Node(List.empty, line)
{
def toWikiText = text
def toPlainText = text
/**
* Returns the text denoted by this node.
*/
override protected def retrieveText(recurse: Boolean) = Some(text)
}
|
FnOio/dbpedia-parsing-functions-scala
|
src/main/scala/dbpedia/dataparsers/util/wikiparser/TextNode.scala
|
Scala
|
gpl-2.0
| 458 |
package common.las
import math._
abstract class Method(L: Array[Array[Double]], r: Array[Double]) {
val fmt = "%10.6f"
def matrixRepr(a: Array[Array[Double]]): String = {
val s = StringBuilder.newBuilder
for (row <- a) s ++= row map (el => fmt.format(el)) mkString("(", " ", ")\\n")
s.result
}
def vectorRepr(v: Array[Double]): String = {
val s = StringBuilder.newBuilder
for (el <- v) s ++= "(" + fmt.format(el) + ")\\n"
s.result
}
def rowRepr(v: Array[Double]): String = {
v map { el => fmt.format(el) } mkString("(", " ", ")")
}
val A = L map (_.clone)
val dim = A.length
val f = r.clone
val x = Array.ofDim[Double](dim)
def X = rowRepr(x)
def lhs = println(matrixRepr(L))
def rhs = println(vectorRepr(r))
}
|
dnoskov/cm
|
src/main/scala/common/las/Method.scala
|
Scala
|
mit
| 778 |
package warsztat
import java.io.OutputStream
import scala.util.Try
object RichByteArray {
implicit class RichByteArray(ba: Array[Byte]) {
def render(prefix: String): String = {
prefix + new String(ba)
}
def writeTo(in: OutputStream): Try[Unit] = ???
}
}
|
bjankie1/warsztat-scala
|
src/main/scala/warsztat/RichByteArray.scala
|
Scala
|
apache-2.0
| 283 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.feature
import java.{util => ju}
import org.apache.spark.SparkException
import org.apache.spark.annotation.Since
import org.apache.spark.ml.Model
import org.apache.spark.ml.attribute.NominalAttribute
import org.apache.spark.ml.param._
import org.apache.spark.ml.param.shared.{HasInputCol, HasOutputCol}
import org.apache.spark.ml.util._
import org.apache.spark.sql._
import org.apache.spark.sql.expressions.UserDefinedFunction
import org.apache.spark.sql.functions._
import org.apache.spark.sql.types.{DoubleType, StructField, StructType}
/**
* `Bucketizer` maps a column of continuous features to a column of feature buckets.
*/
@Since("1.4.0")
final class Bucketizer @Since("1.4.0") (@Since("1.4.0") override val uid: String)
extends Model[Bucketizer] with HasInputCol with HasOutputCol with DefaultParamsWritable {
@Since("1.4.0")
def this() = this(Identifiable.randomUID("bucketizer"))
/**
* Parameter for mapping continuous features into buckets. With n+1 splits, there are n buckets.
* A bucket defined by splits x,y holds values in the range [x,y) except the last bucket, which
* also includes y. Splits should be of length greater than or equal to 3 and strictly increasing.
* Values at -inf, inf must be explicitly provided to cover all Double values;
* otherwise, values outside the splits specified will be treated as errors.
*
* See also [[handleInvalid]], which can optionally create an additional bucket for NaN values.
*
* @group param
*/
@Since("1.4.0")
val splits: DoubleArrayParam = new DoubleArrayParam(this, "splits",
"Split points for mapping continuous features into buckets. With n+1 splits, there are n " +
"buckets. A bucket defined by splits x,y holds values in the range [x,y) except the last " +
"bucket, which also includes y. The splits should be of length >= 3 and strictly " +
"increasing. Values at -inf, inf must be explicitly provided to cover all Double values; " +
"otherwise, values outside the splits specified will be treated as errors.",
Bucketizer.checkSplits)
/** @group getParam */
@Since("1.4.0")
def getSplits: Array[Double] = $(splits)
/** @group setParam */
@Since("1.4.0")
def setSplits(value: Array[Double]): this.type = set(splits, value)
/** @group setParam */
@Since("1.4.0")
def setInputCol(value: String): this.type = set(inputCol, value)
/** @group setParam */
@Since("1.4.0")
def setOutputCol(value: String): this.type = set(outputCol, value)
/**
* Param for how to handle invalid entries. Options are 'skip' (filter out rows with
* invalid values), 'error' (throw an error), or 'keep' (keep invalid values in a special
* additional bucket).
* Default: "error"
* @group param
*/
// TODO: SPARK-18619 Make Bucketizer inherit from HasHandleInvalid.
@Since("2.1.0")
val handleInvalid: Param[String] = new Param[String](this, "handleInvalid", "how to handle " +
"invalid entries. Options are skip (filter out rows with invalid values), " +
"error (throw an error), or keep (keep invalid values in a special additional bucket).",
ParamValidators.inArray(Bucketizer.supportedHandleInvalids))
/** @group getParam */
@Since("2.1.0")
def getHandleInvalid: String = $(handleInvalid)
/** @group setParam */
@Since("2.1.0")
def setHandleInvalid(value: String): this.type = set(handleInvalid, value)
setDefault(handleInvalid, Bucketizer.ERROR_INVALID)
@Since("2.0.0")
override def transform(dataset: Dataset[_]): DataFrame = {
transformSchema(dataset.schema)
val (filteredDataset, keepInvalid) = {
if (getHandleInvalid == Bucketizer.SKIP_INVALID) {
// "skip" NaN option is set, will filter out NaN values in the dataset
(dataset.na.drop().toDF(), false)
} else {
(dataset.toDF(), getHandleInvalid == Bucketizer.KEEP_INVALID)
}
}
val bucketizer: UserDefinedFunction = udf { (feature: Double) =>
Bucketizer.binarySearchForBuckets($(splits), feature, keepInvalid)
}
val newCol = bucketizer(filteredDataset($(inputCol)).cast(DoubleType))
val newField = prepOutputField(filteredDataset.schema)
filteredDataset.withColumn($(outputCol), newCol, newField.metadata)
}
private def prepOutputField(schema: StructType): StructField = {
val buckets = $(splits).sliding(2).map(bucket => bucket.mkString(", ")).toArray
val attr = new NominalAttribute(name = Some($(outputCol)), isOrdinal = Some(true),
values = Some(buckets))
attr.toStructField()
}
@Since("1.4.0")
override def transformSchema(schema: StructType): StructType = {
SchemaUtils.checkNumericType(schema, $(inputCol))
SchemaUtils.appendColumn(schema, prepOutputField(schema))
}
@Since("1.4.1")
override def copy(extra: ParamMap): Bucketizer = {
defaultCopy[Bucketizer](extra).setParent(parent)
}
}
@Since("1.6.0")
object Bucketizer extends DefaultParamsReadable[Bucketizer] {
private[feature] val SKIP_INVALID: String = "skip"
private[feature] val ERROR_INVALID: String = "error"
private[feature] val KEEP_INVALID: String = "keep"
private[feature] val supportedHandleInvalids: Array[String] =
Array(SKIP_INVALID, ERROR_INVALID, KEEP_INVALID)
/**
* We require splits to be of length >= 3 and to be in strictly increasing order.
* No NaN split should be accepted.
*/
private[feature] def checkSplits(splits: Array[Double]): Boolean = {
if (splits.length < 3) {
false
} else {
var i = 0
val n = splits.length - 1
while (i < n) {
if (splits(i) >= splits(i + 1) || splits(i).isNaN) return false
i += 1
}
!splits(n).isNaN
}
}
/**
* Binary searching in several buckets to place each data point.
* @param splits array of split points
* @param feature data point
* @param keepInvalid NaN flag.
* Set "true" to make an extra bucket for NaN values;
* Set "false" to report an error for NaN values
* @return bucket for each data point
* @throws SparkException if a feature is < splits.head or > splits.last
*/
private[feature] def binarySearchForBuckets(
splits: Array[Double],
feature: Double,
keepInvalid: Boolean): Double = {
if (feature.isNaN) {
if (keepInvalid) {
splits.length - 1
} else {
throw new SparkException("Bucketizer encountered NaN value. To handle or skip NaNs," +
" try setting Bucketizer.handleInvalid.")
}
} else if (feature == splits.last) {
splits.length - 2
} else {
val idx = ju.Arrays.binarySearch(splits, feature)
if (idx >= 0) {
idx
} else {
val insertPos = -idx - 1
if (insertPos == 0 || insertPos == splits.length) {
throw new SparkException(s"Feature value $feature out of Bucketizer bounds" +
s" [${splits.head}, ${splits.last}]. Check your features, or loosen " +
s"the lower/upper bound constraints.")
} else {
insertPos - 1
}
}
}
}
@Since("1.6.0")
override def load(path: String): Bucketizer = super.load(path)
}
|
wangyixiaohuihui/spark2-annotation
|
mllib/src/main/scala/org/apache/spark/ml/feature/Bucketizer.scala
|
Scala
|
apache-2.0
| 8,024 |
trait Equalizer[T]
trait Gen[A]
class Broken {
implicit def const[T](x: T): Gen[T] = ???
implicit def convertToEqualizer[T](left: T): Equalizer[T] = ???
def in(a: Any) = ()
in {
import scala.None // any import will do..
"" == "" // this no longer triggers the bug, as Object#== now overrides Any#==
}
// We can still trigger the bug with a structural type, see pending/neg/t8219.scala
}
|
lampepfl/dotty
|
tests/pos/t8219.scala
|
Scala
|
apache-2.0
| 410 |
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.asual.summer.sample.web
import com.asual.summer.core._
import com.asual.summer.core.view._
import com.asual.summer.json.JsonView
import com.asual.summer.xml.XmlView
import com.asual.summer.sample.domain._
import com.asual.summer.sample.domain.Technology.Image
import java.util.Arrays
import javax.servlet.http.HttpServletResponse
import javax.validation.Valid
import org.springframework.stereotype.Controller
import org.springframework.ui.ModelMap
import org.springframework.web.bind.annotation._
import org.springframework.web.servlet._
import org.springframework.web.servlet.view.RedirectView
/**
*
* @author Rostislav Hristov
*
*/
@Controller
@RequestMapping(Array("/technology"))
class TechnologyController {
@RequestMapping(method=Array(RequestMethod.GET))
@ResponseViews(Array(classOf[AbstractResponseView]))
def list:ModelAndView = {
return new ModelAndView("/list", new ModelMap(Technology.list))
}
@RequestMapping(method=Array(RequestMethod.POST))
def persist(@Valid @ModelAttribute technology:Technology):ModelAndView = {
technology.merge
return new ModelAndView(new RedirectView("/technology/" + technology.value, true))
}
@RequestMapping(value=Array("/{value}"), method=Array(RequestMethod.GET))
@ResponseViews(Array(classOf[JsonView], classOf[XmlView]))
def view(@PathVariable("value") value:String):ModelAndView = {
var technology = Technology.find(value)
if (technology == null) {
throw new ViewNotFoundException
}
return new ModelAndView("/view", new ModelMap(technology))
}
@RequestMapping(value=Array("/{value}"), method=Array(RequestMethod.PUT))
def merge(@Valid @ModelAttribute technology:Technology):ModelAndView = {
technology.merge
return new ModelAndView(new RedirectView("/technology/" + technology.value, true))
}
@RequestMapping(value=Array("/{value}"), method=Array(RequestMethod.DELETE))
def remove(@PathVariable("value") value:String):ModelAndView = {
var technology = Technology.find(value)
if (technology != null) {
technology.remove
}
return new ModelAndView(new RedirectView("/technology", true))
}
@RequestMapping(Array("/add"))
def add:ModelAndView = {
var model = new ModelMap
model.addAllAttributes(Arrays.asList(new Technology, License.list, Status.list))
return new ModelAndView("/add", model)
}
@RequestMapping(Array("/{value}/edit"))
def edit(@PathVariable("value") value:String):ModelAndView = {
var technology = Technology.find(value)
if (technology == null) {
throw new ViewNotFoundException
}
var model = new ModelMap
model.addAllAttributes(Arrays.asList(technology, License.list, Status.list))
return new ModelAndView("/edit", model)
}
@RequestMapping(Array("/image/{value}"))
def image(@PathVariable("value") value:String, response:HttpServletResponse) {
var image = Technology.findImage(value)
if (image != null) {
response.setContentLength(image.getBytes.length)
response.setContentType(image.contentType)
response.getOutputStream.write(image.getBytes)
} else {
response.setStatus(HttpServletResponse.SC_NOT_FOUND)
}
response.getOutputStream.flush
response.getOutputStream.close
}
}
|
asual/summer
|
samples/scala/src/main/scala/com/asual/summer/sample/web/TechnologyController.scala
|
Scala
|
apache-2.0
| 3,735 |
/*
* Cakemix
*
* Copyright Xebia BV and Wehkamp BV
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package cakemix
import akka.actor.{ Actor, Scheduler }
/**
* Mixin trait that provides an Akka Scheduler.
*/
trait SchedulerProvider {
implicit def scheduler: Scheduler
}
/**
* Implementation of [[cakemix.SchedulerProvider]] that uses the
* actor's context to provide an instance of Scheduler.
*/
trait SchedulerProviderForActors { this: Actor =>
def scheduler: Scheduler = context.system.scheduler
}
|
xebia/cakemix
|
src/main/scala/cakemix/SchedulerProvider.scala
|
Scala
|
mit
| 675 |
package io.iohk.ethereum.db.components
import io.iohk.ethereum.db.storage._
import io.iohk.ethereum.db.storage.pruning.PruningMode
import io.iohk.ethereum.domain.BlockchainStorages
trait StoragesComponent {
val storages: Storages
trait Storages extends BlockchainStorages {
val blockHeadersStorage: BlockHeadersStorage
val blockBodiesStorage: BlockBodiesStorage
val blockNumberMappingStorage: BlockNumberMappingStorage
val receiptStorage: ReceiptStorage
val nodeStorage: NodeStorage
val evmCodeStorage: EvmCodeStorage
val chainWeightStorage: ChainWeightStorage
val appStateStorage: AppStateStorage
val fastSyncStateStorage: FastSyncStateStorage
val transactionMappingStorage: TransactionMappingStorage
val knownNodesStorage: KnownNodesStorage
val pruningMode: PruningMode
val cachedNodeStorage: CachedNodeStorage
}
}
|
input-output-hk/etc-client
|
src/main/scala/io/iohk/ethereum/db/components/StoragesComponent.scala
|
Scala
|
mit
| 892 |
package clean.run.uti
import al.strategies.{RandomSampling, HTUFixo, DensityWeightedTrainingUtilityFixo}
import clean.lib.{CM, Ds}
import ml.classifiers._
import scala.util.Random
object plotaTUvsMarvsATU extends App with CM {
val context: String = ""
val ds = Ds("banana", readOnly = true)
ds.open()
val m = (1 to 1).par map { n =>
val (tr, ts) = new Random(n).shuffle(ds.patterns).splitAt(1000)
val e = KNNBatcha(5, "eucl", ds.patterns)
val tu = DensityWeightedTrainingUtilityFixo(tr, e, tr, "eucl").queries.take(1000)
val rnd = RandomSampling(tr).queries.take(1000)
val l = tu.zip(rnd)
List.fill(l.size)(l).zipWithIndex.map { case (x, i) =>
val (t, r) = x.take(i + 1).unzip
val (mt, mr) = e.build(t) -> e.build(r)
acc(mt.confusion(ts)) -> acc(mr.confusion(ts))
}
}
ds.close()
val r = m.transpose.map { l =>
val (a, b) = l.unzip
a.sum / a.size -> b.sum / b.size
}
r foreach { case (a, b) => println(s"$a $b") }
}
|
active-learning/active-learning-scala
|
src/main/scala/clean/run/uti/plotaTUvsMarvsATU.scala
|
Scala
|
gpl-2.0
| 992 |
/*
* SPDX-License-Identifier: Apache-2.0
*
* Copyright 2015-2021 Andre White.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.truthencode.ddo.model.item.HeldItem
import io.truthencode.ddo.model.item.WearableItem
import io.truthencode.ddo.{WearLocation, Wearable}
/**
* Includes Bucklers / and Towershields
*/
trait Shield extends WearableItem with Wearable {
/**
* A bitmask that corresponds to one or more [io.truthencode.ddo.WearLocation] values.
*/
override def allowedWearLocationFlags: Int = WearLocation.OffHand.bitValue
}
|
adarro/ddo-calc
|
subprojects/common/ddo-core/src/main/scala/io/truthencode/ddo/model/item/HeldItem/Shield.scala
|
Scala
|
apache-2.0
| 1,103 |
// These are meant to be typed into the REPL. You can also run
// scala -Xnojline < repl-session.scala to run them all at once.
8 * 5 + 2
0.5 * res0
"Hello, " + res0
res2.toUpperCase
|
nmt1994/Scala-Practica
|
src/week1/codes/ch01/sec01/repl-session.scala
|
Scala
|
mit
| 188 |
package edu.berkeley.cs.amplab.mlmatrix
import org.scalatest.FunSuite
import breeze.linalg._
import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{SQLContext, DataFrame, Row}
import org.apache.spark.sql.types._
class RowPartitionedMatrixSuite extends FunSuite with LocalSparkContext with Logging {
test("reduceRowElements()") {
sc = new SparkContext("local", "test")
val testMat = RowPartitionedMatrix.fromArray(
sc.parallelize(Seq(
Array[Double](1, 2, 3),
Array[Double](1, 9, -1),
Array[Double](0, 0, 1),
Array[Double](0, 1, 0)
), 2), // row-major, laid out as is
Seq(2, 2),
3
)
val rowProducts = testMat.reduceRowElements(_ * _)
assert(rowProducts.collect().toArray === Array(6, -9, 0, 0),
"reduceRowElements() does not return correct answers!")
assert(rowProducts.numRows() === 4, "reduceRowElements() returns a result with incorrect row count!")
assert(rowProducts.numCols() === 1, "reduceRowElements() returns a result with incorrect col count!")
}
test("reduceColElements() and colSums()") {
sc = new SparkContext("local", "test")
val testMat = RowPartitionedMatrix.fromArray(
sc.parallelize(Seq(
Array[Double](1, 2, 3),
Array[Double](1, 9, -1),
Array[Double](1, 0, 1),
Array[Double](1618, 1, 4)
), 2), // row-major, laid out as is
Seq(2, 2),
3
)
val colProducts = testMat.reduceColElements(_ * _)
assert(colProducts.collect().toArray === Array(1618, 0, -12),
"reduceColElements() does not return correct answers!")
assert(colProducts.numRows() === 1, "reduceColElements() returns a result with incorrect row count!")
assert(colProducts.numCols() === 3, "reduceColElements() returns a result with incorrect col count!")
assert(testMat.colSums() === Seq(1621, 12, 7), "colSums() returns incorrect sums!")
}
test("rowSums()") {
sc = new SparkContext("local", "test")
val testMat = RowPartitionedMatrix.fromArray(
sc.parallelize(Seq(
Array[Double](1, 2, 3),
Array[Double](1, 9, -1),
Array[Double](0, 0, 1),
Array[Double](0, 1, 0)
), 4), // row-major, laid out as is
Seq(1, 1, 1, 1),
3
)
assert(testMat.rowSums() === Seq(6, 9, 1, 1), "rowSums() returns incorrect sums!")
}
test("slicing using various apply() methods") {
sc = new SparkContext("local", "test")
val testMat = RowPartitionedMatrix.fromArray(
sc.parallelize(Seq(
Array[Double](1, 2, 3),
Array[Double](1, 9, -1),
Array[Double](0, 1618, 1),
Array[Double](0, 1, 0)
), 4), // row-major, laid out as is
Seq(1, 1, 1, 1),
3
)
assert(testMat(::, Range(1, 2)).collect().toArray === Array(2, 9, 1618, 1))
assert(testMat(::, Range(1, 3)).collect().toArray === Array(2, 9, 1618, 1, 3, -1, 1, 0))
assert(testMat(Range(1, 2), ::).collect().toArray === Array(1, 9, -1))
assert(testMat(Range(0, 5), ::).collect().toArray === testMat.collect().toArray)
assert(testMat(Range(2, 3), Range(1, 2)).collect().toArray.head === 1618)
assert(testMat(Range(2, 3), Range(1, 3)).collect().toArray === Array(1618, 1))
assert(testMat(Range(2, 2), Range(1, 3)).collect().toArray.isEmpty)
}
test("collect") {
sc = new SparkContext("local", "test")
val matrixParts = (0 until 200).map { i =>
DenseMatrix.rand(50, 10)
}
val r = RowPartitionedMatrix.fromMatrix(sc.parallelize(matrixParts, 200))
val rL = matrixParts.reduceLeftOption((a, b) => DenseMatrix.vertcat(a, b)).getOrElse(new DenseMatrix[Double](0, 0))
val rD = r.collect()
assert(rL == rD)
}
test("reduceRowElements() with the original data in a DataFrame") {
sc = new SparkContext("local", "test")
val sqlContext = new SQLContext(sc)
val testRDD = sc.parallelize(Seq(
Array(1.0, 2.0, 3.0),
Array(1.0, 9.0, -1.0),
Array(0.0, 0.0, 1.0),
Array(0.0, 1.0, 0.0)
)).map(x => Row(x(0), x(1), x(2)))
val testSchema = StructType(
StructField("v1", DoubleType, true) ::
StructField("v2", DoubleType, true) ::
StructField("v3", DoubleType, true) :: Nil)
val testDF = sqlContext.createDataFrame(testRDD, testSchema)
val testMat = RowPartitionedMatrix.fromDataFrame(testDF)
val rowProducts = testMat.reduceRowElements(_ * _)
assert(rowProducts.collect().toArray === Array(6.0, -9.0, 0.0, 0.0),
"reduceRowElements() does not return correct answers!")
assert(rowProducts.numRows() === 4, "reduceRowElements() returns a result with incorrect row count!")
assert(rowProducts.numCols() === 1, "reduceRowElements() returns a result with incorrect col count!")
}
}
|
amplab/ml-matrix
|
src/test/scala/edu/berkeley/cs/amplab/mlmatrix/RowPartitionedMatrixSuite.scala
|
Scala
|
apache-2.0
| 4,810 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.keras
import java.io.{File, PrintWriter}
import com.intel.analytics.bigdl.tensor.Tensor
import scala.io.Source
import scala.sys.process._
sealed trait MainCodeType
object Loss extends MainCodeType
object Layer extends MainCodeType
object Regularizer extends MainCodeType
object KerasRunner {
// scalastyle:off
val code_head =
"""
|from keras.layers.core import *
|from keras.layers.convolutional import *
|from keras.layers import *
|from keras.objectives import *
|from keras.models import Model
|import keras.backend as K
|import numpy as np
|import tempfile
|
|np.random.seed(1337) # for reproducibility
|
|def create_tmp_path(name):
| tmp_file = tempfile.NamedTemporaryFile(prefix="UnitTest-keras-" + name + "-")
| tmp_file.close()
| return tmp_file.name
|
""".stripMargin
val code_for_loss =
"""
|grad_input = K.get_session().run(K.gradients(loss, [input_tensor]),
| feed_dict={input_tensor: input, target_tensor: Y})
|output = K.get_session().run(loss, feed_dict={input_tensor: input, target_tensor: Y})
|weights = []
|grad_weight = []
""".stripMargin
val code_for_layer =
"""
|Y = []
|output = model.predict(input)
|
|grad_input = K.get_session().run(K.gradients(model.output * output, model.input), feed_dict={input_tensor: input}) # grad_input
|
|grad_weight = K.get_session().run(K.gradients(model.output * output, model.trainable_weights), # grad_weight
| feed_dict={input_tensor: input})
|weights = model.get_weights()
""".stripMargin
val code_for_save = """
|result_list = []
|for item in [("weights", weights), ("input", input), ("target", Y), ("grad_input", grad_input), ("grad_weight", grad_weight), ("output",output)]:
| if isinstance(item[1], list):
| if len(item[1]) > 1:
| for i in range(len(item[1])):
| result_list.append((item[0] + "_" + str(i), item[1][i]))
| elif len(item[1]) == 1:
| result_list.append((item[0], item[1][0]))
| else:
| continue
| else:
| result_list.append(item)
|for result in result_list:
| value_path = create_tmp_path(result[0] + "_value")
| shape_path = create_tmp_path(result[0] + "_shape")
| np.savetxt(shape_path, result[1].shape)
| np.savetxt(value_path, result[1].ravel())
| print(shape_path)
| print(value_path)
|
|
""".stripMargin
val code_for_regularizer =
"""
|Y = K.get_session().run(model.losses, feed_dict={input_tensor: input})
|output = model.predict(input)
|grad_input = K.get_session().run(K.gradients(model.losses, [input_tensor]),
| feed_dict={input_tensor: input})
|grad_input += output # they're two branches, we should gather them.
|weights = []
|grad_weight = []
""".stripMargin
// scalastyle:on
private def getWeightRelate(pvalues: Map[String, Array[Float]],
keyName: String): Array[Tensor[Float]] = {
if (!pvalues.keySet.filter(key => key.contains(keyName)).isEmpty) {
val weightNum = pvalues.keySet.filter(key => key.contains(keyName)).size / 2
Range(0, weightNum).map {i =>
Tensor[Float](
data = pvalues(s"${keyName}_${i}_value"),
shape = pvalues(s"${keyName}_${i}_shape").map(_.toInt))
}.toArray
} else {
null
}
}
private def getNoneWeightRelate(pvalues: Map[String, Array[Float]],
keyName: String): Tensor[Float] = {
if (!pvalues.keySet.filter(key => key.contains(keyName)).isEmpty) {
Tensor[Float](
data = pvalues(s"${keyName}_value"),
shape = pvalues(s"${keyName}_shape").map(_.toInt))
} else {
null
}
}
// return: (grad_input, grad_weight, weights, input, target, output)
def run(code: String, codeType: MainCodeType = Layer): (Tensor[Float], Array[Tensor[Float]],
Array[Tensor[Float]], Tensor[Float], Tensor[Float], Tensor[Float]) = {
val pcodeFile = java.io.File.createTempFile("UnitTest", "keras")
val writer = new PrintWriter(pcodeFile)
writer.write(code_head)
writer.write(code)
writer.write(
codeType match {
case Layer => code_for_layer
case Loss => code_for_loss
case Regularizer => code_for_regularizer
})
writer.write(code_for_save)
writer.close()
val pcodeFileAbsPath = pcodeFile.getAbsolutePath
println("python code file: " + pcodeFileAbsPath)
val resultPaths = s"python ${pcodeFileAbsPath}".!!.split("\\n")
val pvalues = resultPaths.map {file =>
val value = Source.fromFile(file).getLines().map(_.toFloat).toArray
val key = file.split("-")(2)
key -> value
}.toMap
val grad_input = getNoneWeightRelate(pvalues, "grad_input")
val grad_weight = getWeightRelate(pvalues, "grad_weight")
val weights = getWeightRelate(pvalues, "weights")
val input = getNoneWeightRelate(pvalues, "input")
val target = getNoneWeightRelate(pvalues, "target")
var output = getNoneWeightRelate(pvalues, "output")
resultPaths.foreach {path =>
new File(path).delete()
}
if (pcodeFile.exists()) {
pcodeFile.delete()
}
(grad_input, grad_weight, weights, input, target, output)
}
}
|
qiuxin2012/BigDL
|
spark/dl/src/test/scala/com/intel/analytics/bigdl/keras/KerasRunner.scala
|
Scala
|
apache-2.0
| 6,207 |
/***********************************************************************
* Copyright (c) 2013-2018 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.features.avro
import java.io.{Closeable, InputStream}
import org.apache.avro.file.DataFileStream
import org.locationtech.geomesa.features.SerializationOption.SerializationOptions
import org.locationtech.geomesa.utils.io.CloseWithLogging
import org.opengis.feature.simple.{SimpleFeature, SimpleFeatureType}
class AvroDataFileReader(is: InputStream) extends Iterator[SimpleFeature] with Closeable {
private val datumReader = new FeatureSpecificReader(null, null, SerializationOptions.withUserData)
private val dfs = new DataFileStream[AvroSimpleFeature](is, datumReader)
if (!AvroDataFile.canParse(dfs)) {
CloseWithLogging(dfs)
throw new IllegalArgumentException(s"Only version ${AvroDataFile.Version} data files supported")
}
private val sft = AvroDataFile.getSft(dfs)
private val schema = dfs.getSchema
datumReader.setSchema(schema)
datumReader.setTypes(sft, sft)
def getSft: SimpleFeatureType = sft
override def hasNext: Boolean = dfs.hasNext
override def next(): SimpleFeature = dfs.next()
override def close(): Unit = dfs.close()
}
|
ddseapy/geomesa
|
geomesa-features/geomesa-feature-avro/src/main/scala/org/locationtech/geomesa/features/avro/AvroDataFileReader.scala
|
Scala
|
apache-2.0
| 1,575 |
/*
* Copyright 2015 org.NLP4L
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.nlp4l.framework.builtin
import com.typesafe.config.Config
import scala.collection.mutable.ListBuffer
import scala.concurrent.Await
import scala.util.{ Try, Success, Failure }
import org.nlp4l.framework.dao.JobDAO
import org.nlp4l.framework.dao.RunDAO
import org.nlp4l.framework.models.Dictionary
import org.nlp4l.framework.models.DictionaryAttribute
import org.nlp4l.framework.models.Record
import play.api.Logger
import org.nlp4l.framework.processors.RecordProcessor
import org.nlp4l.framework.processors.ProcessorFactory
import org.nlp4l.framework.processors.Processor
/**
* Sort processor factory
*/
class SortProcessorFactory(settings: Config) extends ProcessorFactory(settings) {
override def getInstance: Processor = {
new SortProcessor(getStrParam("cellname", "id"), getStrParam("order", "asc"))
}
}
/**
* Sort Processor
*
* @param key Sort key name
* @param order Sort order, "desc", "asc"
*/
final class SortProcessor(val key: String, val order: String) extends Processor {
private val logger = Logger(this.getClass)
def sort(jobDAO: JobDAO, runDAO: RunDAO, jobId: Int, runId: Int, dicAttr: DictionaryAttribute, dic: Option[Dictionary]): Option[Dictionary] = {
var out:Option[Dictionary] = dic
val tmpRunId: Int = runId + 1000000
dic map { d => {
val f1 = runDAO.createTable(jobId, tmpRunId, dicAttr)
Await.ready(f1, scala.concurrent.duration.Duration.Inf)
f1.value.get match {
case Success(n) => runDAO.insertData(jobId, tmpRunId, dicAttr, d)
case Failure(ex) => throw(ex)
}
var newout:Dictionary = runDAO.fetchAll(jobId, tmpRunId, key, order)
out = Some(newout)
val f2 = runDAO.dropTable(jobId, tmpRunId)
Await.ready(f2, scala.concurrent.duration.Duration.Inf)
f1.value.get match {
case Success(n) => 0
case Failure(ex) => logger.warn(ex.getMessage)
}
}
}
out
}
}
/**
* Merge processor factory
*/
class MergeProcessorFactory(settings: Config) extends ProcessorFactory(settings) {
override def getInstance: Processor = {
new MergeProcessor(getStrParam("cellname", ""), getStrParam("glue", ""))
}
}
/**
* Merge Processor
*
* @param key Merge key name
* @param glue string to concatenate
*/
final class MergeProcessor(val key: String, val glue: String) extends Processor {
private val logger = Logger(this.getClass)
def merge(dicAttr: DictionaryAttribute, dic: Option[Dictionary]): Option[Dictionary] = {
var out:Option[Dictionary] = dic
dic map { d =>
var reclist: Seq[Record] = Seq()
var prevRecord: Record = null
d.recordList foreach {rec: Record =>
if(prevRecord != null && rec.canMerge(key, prevRecord)) {
reclist = reclist.init
val merged = rec.merge(key, glue, prevRecord)
reclist = reclist :+ merged
prevRecord = merged
} else {
reclist = reclist :+ rec
prevRecord = rec
}
}
out = Some(Dictionary(reclist))
}
out
}
}
/**
* Replay processor
* This class is to mark that the processors must apply the replay data to dictionary
*/
class ReplayProcessorFactory(settings: Config) extends ProcessorFactory(settings) {
override def getInstance: Processor = {
new ReplayProcessor()
}
}
final class ReplayProcessor extends Processor {
def replay(jobDAO: JobDAO, runDAO: RunDAO, jobId: Int, dicAttr: DictionaryAttribute, dic: Option[Dictionary]): Option[Dictionary] = {
val recordList = ListBuffer.empty[Record]
dic map { d =>
d.recordList foreach { r: Record =>
val hashcode: Int = r.hashCode
if(dicAttr.modifiedRecordList.contains(hashcode)) {
dicAttr.modifiedRecordList.get(hashcode) map { modr =>
recordList += modr
}
} else if(!dicAttr.deletedRecordList.contains(hashcode)) {
recordList += r
}
}
dicAttr.addedRecordList foreach { r =>
recordList += r._2
}
Dictionary(recordList)
}
}
}
|
fubuki/nlp4l
|
app/org/nlp4l/framework/builtin/InternalProcessors.scala
|
Scala
|
apache-2.0
| 4,689 |
/*
* Copyright (c) 2013-2014 Telefónica Investigación y Desarrollo S.A.U.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package es.tid.cosmos.api.controllers.infinity
import scalaz._
import com.typesafe.config.Config
import com.wordnik.swagger.annotations._
import play.api.libs.json.Json
import play.api.mvc.{Action, Controller, Results}
import es.tid.cosmos.api.controllers.common._
import es.tid.cosmos.api.profile.{ApiCredentials, ClusterSecret}
import es.tid.cosmos.api.profile.dao.{ClusterDataStore, ProfileDataStore}
import es.tid.cosmos.servicemanager.ServiceManager
@Api(value = "/infinity/v1/auth", listingPath = "/doc/infinity/v1/auth",
description = "Authenticate users credentials for Infinity")
class InfinityAuthenticationResource(
store: ProfileDataStore with ClusterDataStore,
serviceManager: ServiceManager,
config: Config) extends Controller {
private val authenticator = new InfinityAuthenticator(store, serviceManager)
private val requestAuthentication = new InfinityRequestAuthentication(config)
import Scalaz._
import InfinityAuthenticationResource._
@ApiOperation(value = "Authenticate users credentials for Infinity", httpMethod = "GET",
responseClass = "es.tid.cosmos.api.controllers.infinity.InfinityIdentity", notes ="""
Lookup an identity from either an API pair (key and secret) or a cluster secret. Be careful
to fill in the query string parameters for exactly one of the two options.""")
@ApiErrors(Array(
new ApiError(code = 400, reason = "If the identity can't be found"),
new ApiError(code = 400,
reason = "When other than just the pair API key/secret of cluster secret is present"),
new ApiError(code = 401, reason = "Request lacks a basic authorization header"),
new ApiError(code = 401, reason = "Invalid authentication credentials")
))
def authenticate(
@ApiParam(name="API key")
apiKey: Option[String],
@ApiParam(name="API secret")
apiSecret: Option[String],
@ApiParam(name="Cluster secret")
clusterSecret: Option[String]) = Action { implicit request =>
for {
_ <- requestAuthentication.requireAuthorized(request)
identity <- authenticateFromParameters(apiKey, apiSecret, clusterSecret)
} yield Ok(Json.toJson(identity))
}
private def authenticateFromParameters(
apiKey: Option[String],
apiSecret: Option[String],
clusterSecret: Option[String]): ActionValidation[InfinityIdentity] =
(apiKey, apiSecret, clusterSecret) match {
case (None, None, Some(secret)) => authenticateClusterSecret(secret)
case (Some(key), Some(secret), None) => authenticateApiCredentials(key, secret)
case _ => InvalidParametersResponse.failure
}
private def authenticateClusterSecret(secret: String): ActionValidation[InfinityIdentity] = for {
clusterSecret <- exceptionAsBadRequest(ClusterSecret(secret))
identity <- authenticator.authenticateClusterSecret(clusterSecret).leftMap(notFoundResponse)
} yield identity
private def authenticateApiCredentials(
apiKey: String, apiSecret: String): ActionValidation[InfinityIdentity] = for {
credentials <- exceptionAsBadRequest(ApiCredentials(apiKey, apiSecret))
identity <- authenticator.authenticateApiCredentials(credentials).leftMap(notFoundResponse)
} yield identity
private def exceptionAsBadRequest[T](block: => T): ActionValidation[T] =
Validation.fromTryCatch(block).leftMap(ex => BadRequest(Json.toJson(Message(ex.getMessage))))
private def notFoundResponse(message: Message) = NotFound(Json.toJson(message))
}
private object InfinityAuthenticationResource extends Results {
val InvalidParametersResponse = BadRequest(Json.toJson(Message("Invalid parameters. " +
"Fill in either apiKey/apiSecret or clusterSecret but not both")))
}
|
telefonicaid/fiware-cosmos-platform
|
cosmos-api/app/es/tid/cosmos/api/controllers/infinity/InfinityAuthenticationResource.scala
|
Scala
|
apache-2.0
| 4,350 |
package de.sciss.fscape
import de.sciss.kollflitz
import de.sciss.kollflitz.Vec
import de.sciss.fscape.Ops._
import scala.concurrent.Promise
class DifferentiateSpec extends UGenSpec {
"The Differentiate UGen" should "work as intended" in {
for {
len <- Seq(0, 1, 10, 63, 64, 65)
} {
val p = Promise[Vec[Double]]()
val r = new util.Random(2L)
val inSq = Vector.fill(len)(r.nextDouble())
val g = Graph {
import graph._
val in = ValueDoubleSeq(inSq: _*)
val d = in.differentiate
DebugDoublePromise(d, p)
}
runGraph(g, 64)
import kollflitz.Ops._
val res = asD(getPromiseVec(p))
val exp = if (inSq.isEmpty) Vector.empty else inSq.head +: inSq.differentiate
difOk(res, exp, s"len $len")
}
}
}
|
Sciss/FScape-next
|
core/jvm/src/test/scala/de/sciss/fscape/DifferentiateSpec.scala
|
Scala
|
agpl-3.0
| 817 |
import org.scalatest.FunSuite
import utils._
import scala.math._
class SetSuite extends FunSuite {
test("count1") {
val input = Array("+1 あ い い")
val clfr = new Classifier(input)
val expected = Map[(String, Cls), Int](("あ", Pos) -> 1, ("い", Pos) -> 2)
val actual = clfr.count
assert(expected === actual)
}
test("count2") {
val input = Array("+1 あ い", "-1 い う")
val clfr = new Classifier(input)
val expected = Map[(String, Cls), Int](("あ", Pos) -> 1,
("い", Pos) -> 1,
("い", Neg) -> 1,
("う", Neg) -> 1)
val actual = clfr.count
assert(expected === actual)
}
// test("count3"){
// val input = Array("+1 This pen is very good", "-1 This pen is bad", "-1 Too bad")
// val clfr = new Classifier(input)
//
// val expected = Map[(String, Cls), Int](("This", Pos) -> 1, ("pen", Pos) -> 1, ("is", Pos) -> 1, ("very", Pos) -> 1, ("good", Pos) -> 1, ("This", Neg) -> 1, ("pen", Neg) -> 1, ("is", Neg) -> 1, ("bad", Neg) -> 2, ("Too", Neg) -> 1)
// val actual = clfr.count
// assert(expected === actual)
// }
test("vocab") {
val input = Array("+1 あ い", "-1 い う")
val clfr = new Classifier(input)
val expected = 3 + 1 //未知語を含む
val actual = clfr.vocab_num
assert(expected === actual)
}
// test("probability1"){
// val input = Array("+1 This pen is very good", "-1 This pen is bad", "-1 Too bad")
// val clfr = new Classifier(input)
//
// val expected = Map[(String, Cls), Double](
// ("This", Pos) -> (1+1.0)/(5+7),
// ("pen", Pos) -> (1+1.0)/(5+7),
// ("is", Pos) -> (1+1.0)/(5+7),
// ("very", Pos) -> (1+1.0)/(5+7),
// ("good", Pos) -> (1+1.0)/(5+7),
// ("This", Neg) -> (1+1.0)/(6+7),
// ("pen", Neg) -> (1+1.0)/(6+7),
// ("is", Neg) -> (1+1.0)/(6+7),
// ("bad", Neg) -> (2+1.0)/(6+7),
// ("Too", Neg) -> (1+1.0)/(6+7))
// val actual = clfr.probability
// assert(expected === actual)
// }
test("classify1") {
val train = Array("+1 これ いいね", "-1 これ よくないね")
val clfr = new Classifier(train)
val input = Array("これ", "いいね")
val actual = clfr.classify(input)
val expected = Pos
assert(expected === actual)
}
test("sum of probability_pos") {
val input = Array("+1 あ い", "-1 い う")
val clfr = new Classifier(input)
val sum_pos_p = clfr.probability.filterKeys(p => p._2 == Pos).values.map(loged => pow(10.0, loged)).sum
assert(1.0 === sum_pos_p)
}
test("sum of probability_neg") {
val input = Array("+1 あ い", "-1 い う")
val clfr = new Classifier(input)
val sum_neg_p = clfr.probability.filterKeys(p => p._2 == Neg).values.map(loged => pow(10.0, loged)).sum
assert(1.0 === sum_neg_p)
}
}
|
sakabar/naiveBayesClassifierCL
|
src/test/scala/test.scala
|
Scala
|
gpl-3.0
| 2,869 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.log
import java.io.{File, IOException}
import java.nio.file.{Files, NoSuchFileException}
import java.nio.file.attribute.FileTime
import java.util.concurrent.TimeUnit
import kafka.metrics.{KafkaMetricsGroup, KafkaTimer}
import kafka.server.epoch.LeaderEpochCache
import kafka.server.{FetchDataInfo, LogOffsetMetadata}
import kafka.utils._
import org.apache.kafka.common.errors.CorruptRecordException
import org.apache.kafka.common.record.FileRecords.LogOffsetPosition
import org.apache.kafka.common.record._
import org.apache.kafka.common.utils.Time
import scala.collection.JavaConverters._
import scala.math._
/**
* A segment of the log. Each segment has two components: a log and an index. The log is a FileMessageSet containing
* the actual messages. The index is an OffsetIndex that maps from logical offsets to physical file positions. Each
* segment has a base offset which is an offset <= the least offset of any message in this segment and > any offset in
* any previous segment.
*
* A segment with a base offset of [base_offset] would be stored in two files, a [base_offset].index and a [base_offset].log file.
*
* @param log The message set containing log entries
* @param offsetIndex The offset index
* @param timeIndex The timestamp index
* @param baseOffset A lower bound on the offsets in this segment
* @param indexIntervalBytes The approximate number of bytes between entries in the index
* @param time The time instance
*/
@nonthreadsafe
class LogSegment private[log] (val log: FileRecords,
val offsetIndex: OffsetIndex,
val timeIndex: TimeIndex,
val txnIndex: TransactionIndex,
val baseOffset: Long,
val indexIntervalBytes: Int,
val rollJitterMs: Long,
val maxSegmentMs: Long,
val maxSegmentBytes: Int,
val time: Time) extends Logging {
def shouldRoll(messagesSize: Int, maxTimestampInMessages: Long, maxOffsetInMessages: Long, now: Long): Boolean = {
val reachedRollMs = timeWaitedForRoll(now, maxTimestampInMessages) > maxSegmentMs - rollJitterMs
size > maxSegmentBytes - messagesSize ||
(size > 0 && reachedRollMs) ||
offsetIndex.isFull || timeIndex.isFull || !canConvertToRelativeOffset(maxOffsetInMessages)
}
def resizeIndexes(size: Int): Unit = {
offsetIndex.resize(size)
timeIndex.resize(size)
}
def sanityCheck(timeIndexFileNewlyCreated: Boolean): Unit = {
if (offsetIndex.file.exists) {
offsetIndex.sanityCheck()
// Resize the time index file to 0 if it is newly created.
if (timeIndexFileNewlyCreated)
timeIndex.resize(0)
timeIndex.sanityCheck()
txnIndex.sanityCheck()
}
else throw new NoSuchFileException(s"Offset index file ${offsetIndex.file.getAbsolutePath} does not exist")
}
private var created = time.milliseconds
/* the number of bytes since we last added an entry in the offset index */
private var bytesSinceLastIndexEntry = 0
/* The timestamp we used for time based log rolling */
private var rollingBasedTimestamp: Option[Long] = None
/* The maximum timestamp we see so far */
@volatile private var maxTimestampSoFar: Long = timeIndex.lastEntry.timestamp
@volatile private var offsetOfMaxTimestamp: Long = timeIndex.lastEntry.offset
/* Return the size in bytes of this log segment */
def size: Int = log.sizeInBytes()
/**
* checks that the argument offset can be represented as an integer offset relative to the baseOffset.
*/
def canConvertToRelativeOffset(offset: Long): Boolean = {
(offset - baseOffset) <= Integer.MAX_VALUE
}
/**
* Append the given messages starting with the given offset. Add
* an entry to the index if needed.
*
* It is assumed this method is being called from within a lock.
*
* @param firstOffset The first offset in the message set.
* @param largestOffset The last offset in the message set
* @param largestTimestamp The largest timestamp in the message set.
* @param shallowOffsetOfMaxTimestamp The offset of the message that has the largest timestamp in the messages to append.
* @param records The log entries to append.
* @return the physical position in the file of the appended records
*/
@nonthreadsafe
def append(firstOffset: Long,
largestOffset: Long,
largestTimestamp: Long,
shallowOffsetOfMaxTimestamp: Long,
records: MemoryRecords): Unit = {
if (records.sizeInBytes > 0) {
trace("Inserting %d bytes at offset %d at position %d with largest timestamp %d at shallow offset %d"
.format(records.sizeInBytes, firstOffset, log.sizeInBytes(), largestTimestamp, shallowOffsetOfMaxTimestamp))
val physicalPosition = log.sizeInBytes()
if (physicalPosition == 0)
rollingBasedTimestamp = Some(largestTimestamp)
// append the messages
require(canConvertToRelativeOffset(largestOffset), "largest offset in message set can not be safely converted to relative offset.")
val appendedBytes = log.append(records)
trace(s"Appended $appendedBytes to ${log.file()} at offset $firstOffset")
// Update the in memory max timestamp and corresponding offset.
if (largestTimestamp > maxTimestampSoFar) {
maxTimestampSoFar = largestTimestamp
offsetOfMaxTimestamp = shallowOffsetOfMaxTimestamp
}
// append an entry to the index (if needed)
if(bytesSinceLastIndexEntry > indexIntervalBytes) {
offsetIndex.append(firstOffset, physicalPosition)
timeIndex.maybeAppend(maxTimestampSoFar, offsetOfMaxTimestamp)
bytesSinceLastIndexEntry = 0
}
bytesSinceLastIndexEntry += records.sizeInBytes
}
}
@nonthreadsafe
def updateTxnIndex(completedTxn: CompletedTxn, lastStableOffset: Long) {
if (completedTxn.isAborted) {
trace(s"Writing aborted transaction $completedTxn to transaction index, last stable offset is $lastStableOffset")
txnIndex.append(new AbortedTxn(completedTxn, lastStableOffset))
}
}
private def updateProducerState(producerStateManager: ProducerStateManager, batch: RecordBatch): Unit = {
if (batch.hasProducerId) {
val producerId = batch.producerId
val appendInfo = producerStateManager.prepareUpdate(producerId, isFromClient = false)
val maybeCompletedTxn = appendInfo.append(batch)
producerStateManager.update(appendInfo)
maybeCompletedTxn.foreach { completedTxn =>
val lastStableOffset = producerStateManager.completeTxn(completedTxn)
updateTxnIndex(completedTxn, lastStableOffset)
}
}
producerStateManager.updateMapEndOffset(batch.lastOffset + 1)
}
/**
* Find the physical file position for the first message with offset >= the requested offset.
*
* The startingFilePosition argument is an optimization that can be used if we already know a valid starting position
* in the file higher than the greatest-lower-bound from the index.
*
* @param offset The offset we want to translate
* @param startingFilePosition A lower bound on the file position from which to begin the search. This is purely an optimization and
* when omitted, the search will begin at the position in the offset index.
* @return The position in the log storing the message with the least offset >= the requested offset and the size of the
* message or null if no message meets this criteria.
*/
@threadsafe
private[log] def translateOffset(offset: Long, startingFilePosition: Int = 0): LogOffsetPosition = {
val mapping = offsetIndex.lookup(offset)
log.searchForOffsetWithSize(offset, max(mapping.position, startingFilePosition))
}
/**
* Read a message set from this segment beginning with the first offset >= startOffset. The message set will include
* no more than maxSize bytes and will end before maxOffset if a maxOffset is specified.
*
* @param startOffset A lower bound on the first offset to include in the message set we read
* @param maxSize The maximum number of bytes to include in the message set we read
* @param maxOffset An optional maximum offset for the message set we read
* @param maxPosition The maximum position in the log segment that should be exposed for read
* @param minOneMessage If this is true, the first message will be returned even if it exceeds `maxSize` (if one exists)
*
* @return The fetched data and the offset metadata of the first message whose offset is >= startOffset,
* or null if the startOffset is larger than the largest offset in this log
*/
@threadsafe
def read(startOffset: Long, maxOffset: Option[Long], maxSize: Int, maxPosition: Long = size,
minOneMessage: Boolean = false): FetchDataInfo = {
if (maxSize < 0)
throw new IllegalArgumentException("Invalid max size for log read (%d)".format(maxSize))
val logSize = log.sizeInBytes // this may change, need to save a consistent copy
val startOffsetAndSize = translateOffset(startOffset)
// if the start position is already off the end of the log, return null
if (startOffsetAndSize == null)
return null
val startPosition = startOffsetAndSize.position
val offsetMetadata = new LogOffsetMetadata(startOffset, this.baseOffset, startPosition)
val adjustedMaxSize =
if (minOneMessage) math.max(maxSize, startOffsetAndSize.size)
else maxSize
// return a log segment but with zero size in the case below
if (adjustedMaxSize == 0)
return FetchDataInfo(offsetMetadata, MemoryRecords.EMPTY)
// calculate the length of the message set to read based on whether or not they gave us a maxOffset
val fetchSize: Int = maxOffset match {
case None =>
// no max offset, just read until the max position
min((maxPosition - startPosition).toInt, adjustedMaxSize)
case Some(offset) =>
// there is a max offset, translate it to a file position and use that to calculate the max read size;
// when the leader of a partition changes, it's possible for the new leader's high watermark to be less than the
// true high watermark in the previous leader for a short window. In this window, if a consumer fetches on an
// offset between new leader's high watermark and the log end offset, we want to return an empty response.
if (offset < startOffset)
return FetchDataInfo(offsetMetadata, MemoryRecords.EMPTY, firstEntryIncomplete = false)
val mapping = translateOffset(offset, startPosition)
val endPosition =
if (mapping == null)
logSize // the max offset is off the end of the log, use the end of the file
else
mapping.position
min(min(maxPosition, endPosition) - startPosition, adjustedMaxSize).toInt
}
FetchDataInfo(offsetMetadata, log.read(startPosition, fetchSize),
firstEntryIncomplete = adjustedMaxSize < startOffsetAndSize.size)
}
def fetchUpperBoundOffset(startOffsetPosition: OffsetPosition, fetchSize: Int): Option[Long] =
offsetIndex.fetchUpperBoundOffset(startOffsetPosition, fetchSize).map(_.offset)
/**
* Run recovery on the given segment. This will rebuild the index from the log file and lop off any invalid bytes
* from the end of the log and index.
*
* @param producerStateManager Producer state corresponding to the segment's base offset. This is needed to recover
* the transaction index.
* @param leaderEpochCache Optionally a cache for updating the leader epoch during recovery.
* @return The number of bytes truncated from the log
*/
@nonthreadsafe
def recover(producerStateManager: ProducerStateManager, leaderEpochCache: Option[LeaderEpochCache] = None): Int = {
offsetIndex.reset()
timeIndex.reset()
txnIndex.reset()
var validBytes = 0
var lastIndexEntry = 0
maxTimestampSoFar = RecordBatch.NO_TIMESTAMP
try {
for (batch <- log.batches.asScala) {
batch.ensureValid()
// The max timestamp is exposed at the batch level, so no need to iterate the records
if (batch.maxTimestamp > maxTimestampSoFar) {
maxTimestampSoFar = batch.maxTimestamp
offsetOfMaxTimestamp = batch.lastOffset
}
// Build offset index
if (validBytes - lastIndexEntry > indexIntervalBytes) {
val startOffset = batch.baseOffset
offsetIndex.append(startOffset, validBytes)
timeIndex.maybeAppend(maxTimestampSoFar, offsetOfMaxTimestamp)
lastIndexEntry = validBytes
}
validBytes += batch.sizeInBytes()
if (batch.magic >= RecordBatch.MAGIC_VALUE_V2) {
leaderEpochCache.foreach { cache =>
if (batch.partitionLeaderEpoch > cache.latestEpoch()) // this is to avoid unnecessary warning in cache.assign()
cache.assign(batch.partitionLeaderEpoch, batch.baseOffset)
}
updateProducerState(producerStateManager, batch)
}
}
} catch {
case e: CorruptRecordException =>
warn("Found invalid messages in log segment %s at byte offset %d: %s."
.format(log.file.getAbsolutePath, validBytes, e.getMessage))
}
val truncated = log.sizeInBytes - validBytes
if (truncated > 0)
debug(s"Truncated $truncated invalid bytes at the end of segment ${log.file.getAbsoluteFile} during recovery")
log.truncateTo(validBytes)
offsetIndex.trimToValidSize()
// A normally closed segment always appends the biggest timestamp ever seen into log segment, we do this as well.
timeIndex.maybeAppend(maxTimestampSoFar, offsetOfMaxTimestamp, skipFullCheck = true)
timeIndex.trimToValidSize()
truncated
}
private def loadLargestTimestamp() {
// Get the last time index entry. If the time index is empty, it will return (-1, baseOffset)
val lastTimeIndexEntry = timeIndex.lastEntry
maxTimestampSoFar = lastTimeIndexEntry.timestamp
offsetOfMaxTimestamp = lastTimeIndexEntry.offset
val offsetPosition = offsetIndex.lookup(lastTimeIndexEntry.offset)
// Scan the rest of the messages to see if there is a larger timestamp after the last time index entry.
val maxTimestampOffsetAfterLastEntry = log.largestTimestampAfter(offsetPosition.position)
if (maxTimestampOffsetAfterLastEntry.timestamp > lastTimeIndexEntry.timestamp) {
maxTimestampSoFar = maxTimestampOffsetAfterLastEntry.timestamp
offsetOfMaxTimestamp = maxTimestampOffsetAfterLastEntry.offset
}
}
def collectAbortedTxns(fetchOffset: Long, upperBoundOffset: Long): TxnIndexSearchResult =
txnIndex.collectAbortedTxns(fetchOffset, upperBoundOffset)
override def toString = "LogSegment(baseOffset=" + baseOffset + ", size=" + size + ")"
/**
* Truncate off all index and log entries with offsets >= the given offset.
* If the given offset is larger than the largest message in this segment, do nothing.
*
* @param offset The offset to truncate to
* @return The number of log bytes truncated
*/
@nonthreadsafe
def truncateTo(offset: Long): Int = {
// Do offset translation before truncating the index to avoid needless scanning
// in case we truncate the full index
val mapping = translateOffset(offset)
offsetIndex.truncateTo(offset)
timeIndex.truncateTo(offset)
txnIndex.truncateTo(offset)
// After truncation, reset and allocate more space for the (new currently active) index
offsetIndex.resize(offsetIndex.maxIndexSize)
timeIndex.resize(timeIndex.maxIndexSize)
val bytesTruncated = if (mapping == null) 0 else log.truncateTo(mapping.position)
if (log.sizeInBytes == 0) {
created = time.milliseconds
rollingBasedTimestamp = None
}
bytesSinceLastIndexEntry = 0
if (maxTimestampSoFar >= 0)
loadLargestTimestamp()
bytesTruncated
}
/**
* Calculate the offset that would be used for the next message to be append to this segment.
* Note that this is expensive.
*/
@threadsafe
def readNextOffset: Long = {
val ms = read(offsetIndex.lastOffset, None, log.sizeInBytes)
if (ms == null)
baseOffset
else
ms.records.batches.asScala.lastOption
.map(_.nextOffset)
.getOrElse(baseOffset)
}
/**
* Flush this log segment to disk
*/
@threadsafe
def flush() {
LogFlushStats.logFlushTimer.time {
log.flush()
offsetIndex.flush()
timeIndex.flush()
txnIndex.flush()
}
}
/**
* Update the directory reference for the log and indices in this segment. This would typically be called after a
* directory is renamed.
*/
def updateDir(dir: File): Unit = {
log.setFile(new File(dir, log.file.getName))
offsetIndex.file = new File(dir, offsetIndex.file.getName)
timeIndex.file = new File(dir, timeIndex.file.getName)
txnIndex.file = new File(dir, txnIndex.file.getName)
}
/**
* Change the suffix for the index and log file for this log segment
* IOException from this method should be handled by the caller
*/
def changeFileSuffixes(oldSuffix: String, newSuffix: String) {
log.renameTo(new File(CoreUtils.replaceSuffix(log.file.getPath, oldSuffix, newSuffix)))
offsetIndex.renameTo(new File(CoreUtils.replaceSuffix(offsetIndex.file.getPath, oldSuffix, newSuffix)))
timeIndex.renameTo(new File(CoreUtils.replaceSuffix(timeIndex.file.getPath, oldSuffix, newSuffix)))
txnIndex.renameTo(new File(CoreUtils.replaceSuffix(txnIndex.file.getPath, oldSuffix, newSuffix)))
}
/**
* Append the largest time index entry to the time index and trim the log and indexes.
*
* The time index entry appended will be used to decide when to delete the segment.
*/
def onBecomeInactiveSegment() {
timeIndex.maybeAppend(maxTimestampSoFar, offsetOfMaxTimestamp, skipFullCheck = true)
offsetIndex.trimToValidSize()
timeIndex.trimToValidSize()
log.trim()
}
/**
* The time this segment has waited to be rolled.
* If the first message batch has a timestamp we use its timestamp to determine when to roll a segment. A segment
* is rolled if the difference between the new batch's timestamp and the first batch's timestamp exceeds the
* segment rolling time.
* If the first batch does not have a timestamp, we use the wall clock time to determine when to roll a segment. A
* segment is rolled if the difference between the current wall clock time and the segment create time exceeds the
* segment rolling time.
*/
def timeWaitedForRoll(now: Long, messageTimestamp: Long) : Long = {
// Load the timestamp of the first message into memory
if (rollingBasedTimestamp.isEmpty) {
val iter = log.batches.iterator()
if (iter.hasNext)
rollingBasedTimestamp = Some(iter.next().maxTimestamp)
}
rollingBasedTimestamp match {
case Some(t) if t >= 0 => messageTimestamp - t
case _ => now - created
}
}
/**
* Search the message offset based on timestamp and offset.
*
* This method returns an option of TimestampOffset. The returned value is determined using the following ordered list of rules:
*
* - If all the messages in the segment have smaller offsets, return None
* - If all the messages in the segment have smaller timestamps, return None
* - If all the messages in the segment have larger timestamps, or no message in the segment has a timestamp
* the returned the offset will be max(the base offset of the segment, startingOffset) and the timestamp will be Message.NoTimestamp.
* - Otherwise, return an option of TimestampOffset. The offset is the offset of the first message whose timestamp
* is greater than or equals to the target timestamp and whose offset is greater than or equals to the startingOffset.
*
* This methods only returns None when 1) all messages' offset < startOffing or 2) the log is not empty but we did not
* see any message when scanning the log from the indexed position. The latter could happen if the log is truncated
* after we get the indexed position but before we scan the log from there. In this case we simply return None and the
* caller will need to check on the truncated log and maybe retry or even do the search on another log segment.
*
* @param timestamp The timestamp to search for.
* @param startingOffset The starting offset to search.
* @return the timestamp and offset of the first message that meets the requirements. None will be returned if there is no such message.
*/
def findOffsetByTimestamp(timestamp: Long, startingOffset: Long = baseOffset): Option[TimestampOffset] = {
// Get the index entry with a timestamp less than or equal to the target timestamp
val timestampOffset = timeIndex.lookup(timestamp)
val position = offsetIndex.lookup(math.max(timestampOffset.offset, startingOffset)).position
// Search the timestamp
Option(log.searchForTimestamp(timestamp, position, startingOffset)).map { timestampAndOffset =>
TimestampOffset(timestampAndOffset.timestamp, timestampAndOffset.offset)
}
}
/**
* Close this log segment
*/
def close() {
CoreUtils.swallow(timeIndex.maybeAppend(maxTimestampSoFar, offsetOfMaxTimestamp, skipFullCheck = true), this)
CoreUtils.swallow(offsetIndex.close(), this)
CoreUtils.swallow(timeIndex.close(), this)
CoreUtils.swallow(log.close(), this)
CoreUtils.swallow(txnIndex.close(), this)
}
/**
* Close file handlers used by the log segment but don't write to disk. This is used when the disk may have failed
*/
def closeHandlers() {
CoreUtils.swallow(offsetIndex.closeHandler(), this)
CoreUtils.swallow(timeIndex.closeHandler(), this)
CoreUtils.swallow(log.closeHandlers(), this)
CoreUtils.swallow(txnIndex.close(), this)
}
/**
* Delete this log segment from the filesystem.
*/
def deleteIfExists() {
def delete(delete: () => Boolean, fileType: String, file: File, logIfMissing: Boolean): Unit = {
try {
if (delete())
info(s"Deleted $fileType ${file.getAbsolutePath}.")
else if (logIfMissing)
info(s"Failed to delete $fileType ${file.getAbsolutePath} because it does not exist.")
}
catch {
case e: IOException => throw new IOException(s"Delete of $fileType ${file.getAbsolutePath} failed.", e)
}
}
CoreUtils.tryAll(Seq(
() => delete(log.deleteIfExists _, "log", log.file, logIfMissing = true),
() => delete(offsetIndex.deleteIfExists _, "offset index", offsetIndex.file, logIfMissing = true),
() => delete(timeIndex.deleteIfExists _, "time index", timeIndex.file, logIfMissing = true),
() => delete(txnIndex.deleteIfExists _, "transaction index", txnIndex.file, logIfMissing = false)
))
}
/**
* The last modified time of this log segment as a unix time stamp
*/
def lastModified = log.file.lastModified
/**
* The largest timestamp this segment contains.
*/
def largestTimestamp = if (maxTimestampSoFar >= 0) maxTimestampSoFar else lastModified
/**
* Change the last modified time for this log segment
*/
def lastModified_=(ms: Long) = {
val fileTime = FileTime.fromMillis(ms)
Files.setLastModifiedTime(log.file.toPath, fileTime)
Files.setLastModifiedTime(offsetIndex.file.toPath, fileTime)
Files.setLastModifiedTime(timeIndex.file.toPath, fileTime)
}
}
object LogSegment {
def open(dir: File, baseOffset: Long, config: LogConfig, time: Time, fileAlreadyExists: Boolean = false,
initFileSize: Int = 0, preallocate: Boolean = false, fileSuffix: String = ""): LogSegment = {
val maxIndexSize = config.maxIndexSize
new LogSegment(
FileRecords.open(Log.logFile(dir, baseOffset, fileSuffix), fileAlreadyExists, initFileSize, preallocate),
new OffsetIndex(Log.offsetIndexFile(dir, baseOffset, fileSuffix), baseOffset = baseOffset, maxIndexSize = maxIndexSize),
new TimeIndex(Log.timeIndexFile(dir, baseOffset, fileSuffix), baseOffset = baseOffset, maxIndexSize = maxIndexSize),
new TransactionIndex(baseOffset, Log.transactionIndexFile(dir, baseOffset, fileSuffix)),
baseOffset,
indexIntervalBytes = config.indexInterval,
rollJitterMs = config.randomSegmentJitter,
maxSegmentMs = config.segmentMs,
maxSegmentBytes = config.segmentSize,
time)
}
}
object LogFlushStats extends KafkaMetricsGroup {
val logFlushTimer = new KafkaTimer(newTimer("LogFlushRateAndTimeMs", TimeUnit.MILLISECONDS, TimeUnit.SECONDS))
}
|
MyPureCloud/kafka
|
core/src/main/scala/kafka/log/LogSegment.scala
|
Scala
|
apache-2.0
| 25,638 |
package mesosphere.marathon.integration
import mesosphere.marathon.integration.setup.{ MarathonClusterIntegrationTest, IntegrationFunSuite, WaitTestSupport }
import play.api.libs.json.Json
import scala.concurrent.duration._
import org.scalatest.{ GivenWhenThen, Matchers }
class LeaderIntegrationTest extends IntegrationFunSuite
with MarathonClusterIntegrationTest
with GivenWhenThen
with Matchers {
test("all nodes return the same leader") {
Given("a leader has been elected")
WaitTestSupport.waitUntil("a leader has been elected", 30.seconds) { marathon.leader().code == 200 }
When("calling /v2/leader on all nodes of a cluster")
val results = marathonFacades.map(marathon => marathon.leader())
Then("the requests should all be successful")
results.foreach(_.code should be (200))
And("they should all be the same")
results.map(_.value).distinct should have length 1
}
test("the leader abdicates when it receives a DELETE") {
Given("a leader")
WaitTestSupport.waitUntil("a leader has been elected", 30.seconds) { marathon.leader().code == 200 }
val leader = marathon.leader().value
When("calling DELETE /v2/leader")
val result = marathon.abdicate()
Then("the request should be successful")
result.code should be (200)
(result.entityJson \\ "message") should be (Json.toJson("Leadership abdicted"))
And("the leader must have changed")
WaitTestSupport.waitUntil("the leader changes", 30.seconds) { marathon.leader().value != leader }
}
}
|
sepiroth887/marathon
|
src/test/scala/mesosphere/marathon/integration/LeaderIntegrationTest.scala
|
Scala
|
apache-2.0
| 1,538 |
package blended.updater.config
case class FeatureRef(
name: String,
version: String,
url: Option[String] = None) {
override def toString(): String = s"${getClass().getSimpleName()}(name=${name},version=${version},url=${url})"
}
|
lefou/blended
|
blended.updater.config/shared/src/main/scala/blended/updater/config/FeatureRef.scala
|
Scala
|
apache-2.0
| 239 |
package scala.collection
/** Contains types that increase access to some `private[scala]` members of
* `scala.collection` in order to make them reusable in any package.
*/
object Abstract {
type Traversable[A] = scala.collection.AbstractTraversable[A]
type Iterable[A] = scala.collection.AbstractIterable[A]
type Iterator[A] = scala.collection.AbstractIterator[A]
type Set[A] = scala.collection.AbstractSet[A]
}
object Util {
/** A power of 2 >= `target`.
*/
def powerOf2(target: Int): Int =
scala.collection.mutable.HashTable.powerOfTwo(target)
}
|
Calavoow/scala-graph
|
core/src/main/scala/scala/collection/Abstract.scala
|
Scala
|
bsd-3-clause
| 605 |
package scales.dom
import scala.scalajs.js
trait Observer extends js.Object {
def observe[A](obj: A, callback: js.Function1[js.Array[ChangeEvent], Unit]): A = js.native
def observe[A](obj: A, callback: js.Function1[js.Array[ChangeEvent], Unit], acceptList: js.Array[String]): A = js.native
def unobserve[A](obj: A, callback: js.Function1[js.Array[ChangeEvent], Unit]): A = js.native
def deliverChangeRecords(callback: js.Function1[js.Array[ChangeEvent], Unit]): Unit = js.native
}
trait ChangeEvent extends js.Object {
val name: String = js.native
val `object`: Any = js.native
val `type`: String = js.native
val oldValue: Any = js.native
}
|
greencatsoft/scales
|
core/src/main/scala/scales/dom/Observer.scala
|
Scala
|
apache-2.0
| 668 |
package org.scalarules.dsl.core
import org.scalarules.finance.nl._
import org.scalarules.dsl.nl.grammar._
import TableSelectorGlossary._
import org.scalarules.dsl.nl.grammar.DslCondition._
import org.scalarules.dsl.nl.grammar.DslTableSelector.prikken
import org.scalarules.engine._
class TableSelectorBerekening extends {
} with Berekening (
Gegeven (altijd)
Bereken
ResultString is (prikken in TableFact met waarde(IndexX, IndexY)) en
ResultList is (prikken in TableFact met waardes(IndexXRange, IndexY))
)
|
scala-rules/rule-engine
|
engine/src/test/scala/org/scalarules/dsl/core/TableSelectorBerekening.scala
|
Scala
|
mit
| 524 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.feature
import org.apache.spark.SparkFunSuite
import org.apache.spark.ml.attribute.{AttributeGroup, BinaryAttribute, NominalAttribute}
import org.apache.spark.ml.param.ParamsSuite
import org.apache.spark.mllib.linalg.Vector
import org.apache.spark.mllib.util.MLlibTestSparkContext
import org.apache.spark.sql.DataFrame
import org.apache.spark.sql.functions.col
/**
* one-hot一个有N个不同取值的类别特征可以变成N个数值特征,变换后的每个数值型特征的取值为0或1,
* 在N个特征中,有且只有一个特征值为1,其他特征值都为0
*/
class OneHotEncoderSuite extends SparkFunSuite with MLlibTestSparkContext {
def stringIndexed(): DataFrame = {
val data = sc.parallelize(Seq((0, "a"), (1, "b"), (2, "c"), (3, "a"), (4, "a"), (5, "c")), 2)
val df = sqlContext.createDataFrame(data).toDF("id", "label")
/**
+---+-----+
| id|label|
+---+-----+
| 0| a|
| 1| b|
| 2| c|
| 3| a|
| 4| a|
| 5| c|
+---+-----+
*/
//a 3个,b 1个,c 2个
df.show()
//StringIndexer 按照 Label 出现的频次对其进行序列编码,字符串分类按出现次数排序索引0(a),2(b),1(c),
val indexer = new StringIndexer().setInputCol("label").setOutputCol("labelIndex").fit(df)
/**
* 按照 Label 出现的频次对其进行序列编码 [0.0,a,0] a的编码是0,第二个0是ID,b编码是2,c的编码是1
* 0是出现频次最多的,a 3个,b 1个,c 2个
+---+-----+----------+
| id|label|labelIndex|
+---+-----+----------+
| 0| a| 0.0|
| 1| b| 2.0|
| 2| c| 1.0|
| 3| a| 0.0|
| 4| a| 0.0|
| 5| c| 1.0|
+---+-----+----------+*/
//transform()方法将DataFrame转化为另外一个DataFrame的算法
indexer.transform(df).show()
indexer.transform(df)
}
test("params") {//参数
ParamsSuite.checkParams(new OneHotEncoder)
}
test("OneHotEncoder dropLast = false") {//
val transformed = stringIndexed()
// OneHotEncoder独热编码对于每一个特征,如果它有m个可能值,那么经过独热编码后,就变成m个二元特征,数据会变成稀疏
val encoder = new OneHotEncoder().setInputCol("labelIndex").setOutputCol("labelVec").setDropLast(false)
//transform主要是用来把 一个 DataFrame 转换成另一个 DataFrame
val encoded = encoder.transform(transformed)
/**
* 即3,[0],[1.0]共三个数据项,第1个为激活
+---+-----+----------+-------------+
| id|label|labelIndex| labelVec|
+---+-----+----------+-------------+
| 0| a| 0.0|(3,[0],[1.0])|
| 1| b| 2.0|(3,[2],[1.0])|
| 2| c| 1.0|(3,[1],[1.0])|
| 3| a| 0.0|(3,[0],[1.0])|
| 4| a| 0.0|(3,[0],[1.0])|
| 5| c| 1.0|(3,[1],[1.0])|
+---+-----+----------+-------------+*/
encoded.show()
val output = encoded.select("id", "labelVec").map { r =>
val vec = r.getAs[Vector](1)//转换成向量
(r.getInt(0), vec(0), vec(1), vec(2))//默认转换成密集重向量
}.collect().toSet
// a -> 0, b -> 2, c -> 1 表示意思[a, c, b]
//StringIndexer 按照 Label出现的频次对其进行升序编码,a编码0(3次现),b编码2次现,c编码1次现,
//采用One-Hot编码的方式对上述的样本["a","b","c"]编码,a对应[1.0, 0.0, 0.0],b对应[0.0, 0.0, 1.0],b对应[0.0, 1.0, 0.0]
//(0, "a"), (1, "b"), (2, "c"), (3, "a"), (4, "a"), (5, "c")
val expected = Set((0, 1.0, 0.0, 0.0), (1, 0.0, 0.0, 1.0), (2, 0.0, 1.0, 0.0),
(3, 1.0, 0.0, 0.0), (4, 1.0, 0.0, 0.0), (5, 0.0, 1.0, 0.0))
assert(output === expected)
}
//删除最后
test("OneHotEncoder dropLast = true") {
val transformed = stringIndexed()
val encoder = new OneHotEncoder()
.setInputCol("labelIndex")
.setOutputCol("labelVec")
//transform()方法将DataFrame转化为另外一个DataFrame的算法
val encoded = encoder.transform(transformed)
/**
+---+-----+----------+-------------+
| id|label|labelIndex| labelVec|
+---+-----+----------+-------------+
| 0| a| 0.0|(2,[0],[1.0])|
| 1| b| 2.0| (2,[],[])|
| 2| c| 1.0|(2,[1],[1.0])|
| 3| a| 0.0|(2,[0],[1.0])|
| 4| a| 0.0|(2,[0],[1.0])|
| 5| c| 1.0|(2,[1],[1.0])|
+---+-----+----------+-------------+*/
encoded.show()
val output = encoded.select("id", "labelVec").map { r =>
val vec = r.getAs[Vector](1)//获得向量
(r.getInt(0), vec(0), vec(1))//默认转换成密集向量
}.collect().toSet
// a -> 0, b -> 2, c -> 1 [a, c, b],删除最后一位编码
//(0, "a"), (1, "b"), (2, "c"), (3, "a"), (4, "a"), (5, "c")
val expected = Set((0, 1.0, 0.0), (1, 0.0, 0.0), (2, 0.0, 1.0),
(3, 1.0, 0.0), (4, 1.0, 0.0), (5, 0.0, 1.0))
assert(output === expected)
}
test("input column with ML attribute") {//具有输入列ML属性
//输入列
val attr = NominalAttribute.defaultAttr.withValues("small", "medium", "large")
val df = sqlContext.createDataFrame(Seq(0.0, 1.0, 2.0, 1.0).map(Tuple1.apply)).toDF("size")
.select(col("size").as("size", attr.toMetadata()))
/**
+----+
|size|
+----+
| 0.0|
| 1.0|
| 2.0|
| 1.0|
+----+*/
df.show()
val encoder = new OneHotEncoder().setInputCol("size").setOutputCol("encoded")
//transform()方法将DataFrame转化为另外一个DataFrame的算法
val output = encoder.transform(df)
/**
+----+-------------+
|size| encoded|
+----+-------------+
| 0.0|(2,[0],[1.0])|
| 1.0|(2,[1],[1.0])|
| 2.0| (2,[],[])|
| 1.0|(2,[1],[1.0])|
+----+-------------+*/
output.show()
val group = AttributeGroup.fromStructField(output.schema("encoded"))
assert(group.size === 2)
assert(group.getAttr(0) === BinaryAttribute.defaultAttr.withName("small").withIndex(0))
assert(group.getAttr(1) === BinaryAttribute.defaultAttr.withName("medium").withIndex(1))
}
test("input column without ML attribute") {//具输入列没有ML属性
val df = sqlContext.createDataFrame(Seq(0.0, 1.0, 2.0, 1.0).map(Tuple1.apply)).toDF("index")
val encoder = new OneHotEncoder().setInputCol("index").setOutputCol("encoded")
//transform()方法将DataFrame转化为另外一个DataFrame的算法
val output = encoder.transform(df)
val group = AttributeGroup.fromStructField(output.schema("encoded"))
assert(group.size === 2)
assert(group.getAttr(0) === BinaryAttribute.defaultAttr.withName("0").withIndex(0))
assert(group.getAttr(1) === BinaryAttribute.defaultAttr.withName("1").withIndex(1))
}
}
|
tophua/spark1.52
|
mllib/src/test/scala/org/apache/spark/ml/feature/OneHotEncoderSuite.scala
|
Scala
|
apache-2.0
| 7,714 |
package sangria.cats
import sangria.util.CatsSupport
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpec
class ValidationSpec extends AnyWordSpec with Matchers with CatsSupport {
generateTests("validation")
}
|
OlegIlyenko/sangria
|
modules/core/src/test/scala/sangria/cats/ValidationSpec.scala
|
Scala
|
apache-2.0
| 251 |
/**
* Copyright (C) 2013 Orbeon, Inc.
*
* This program is free software; you can redistribute it and/or modify it under the terms of the
* GNU Lesser General Public License as published by the Free Software Foundation; either version
* 2.1 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Lesser General Public License for more details.
*
* The full text of the license is available at http://www.gnu.org/copyleft/lesser.html
*/
package org.orbeon.oxf.fb
import org.dom4j.{Namespace, QName}
import org.orbeon.oxf.fr.FormRunner._
import org.orbeon.oxf.util.ScalaUtils._
import org.orbeon.oxf.xforms.XFormsConstants._
import org.orbeon.oxf.xforms.action.XFormsAPI._
import org.orbeon.oxf.xforms.analysis.controls.LHHAAnalysis._
import org.orbeon.oxf.xforms.analysis.model.Model
import org.orbeon.oxf.xforms.analysis.model.Model._
import org.orbeon.oxf.xforms.analysis.model.ValidationLevels._
import org.orbeon.oxf.xforms.function.xxforms.ValidationFunction
import org.orbeon.oxf.xforms.xbl.BindingDescriptor
import org.orbeon.oxf.xml.{XMLConstants, XMLUtils}
import org.orbeon.saxon.om.NodeInfo
import org.orbeon.scaxon.XML._
import scala.{xml ⇒ sx}
trait AlertsAndConstraintsOps extends ControlOps {
self: GridOps ⇒ // funky dependency, to resolve at some point
private val OldAlertRefMatcher = """\\$form-resources/([^/]+)/alert(\\[(\\d+)\\])?""".r
private val NewAlertRefMatcher = """xxf:r\\('([^.]+)\\.alert(\\.(\\d+))?'\\)""".r
val OldStandardAlertRef = """$fr-resources/detail/labels/alert"""
// Return the first default alert for the given control, or a blank template if none exists
def readDefaultAlertAsXML(inDoc: NodeInfo, controlName: String): NodeInfo = (
AlertDetails.fromForm(inDoc, controlName)
find (_.default)
getOrElse AlertDetails(None, List(currentLang → ""), global = true)
toXML currentLang
)
// Return all validations as XML for the given control
def readValidationsAsXML(inDoc: NodeInfo, controlName: String): Array[NodeInfo] =
RequiredValidation.fromForm(inDoc, controlName) ::
DatatypeValidation.fromForm(inDoc, controlName) ::
ConstraintValidation.fromForm(inDoc, controlName) map
(v ⇒ elemToNodeInfo(v.toXML(currentLang))) toArray
// Write back everything
def writeAlertsAndValidationsAsXML(
inDoc : NodeInfo,
controlName : String,
newAppearance : String,
defaultAlertElem : NodeInfo,
validationElems : Array[NodeInfo]
): Unit = {
// Current resolutions, which could be lifted in the future:
//
// - writes are destructive: they remove all xf:alert, alert resources, and validations for the control
// - we don't allow editing the validation id, but we preserve it when possible
val validationElemsSeq = validationElems.to[List]
// Extract from XML
val allValidations = {
val idsIterator = nextIds(inDoc, "validation", validationElemsSeq.size).toIterator
validationElemsSeq map (v ⇒ v → (v attValue "type")) flatMap {
case (e, Required.name) ⇒ RequiredValidation.fromXML(e, idsIterator)
case (e, "datatype") ⇒ DatatypeValidation.fromXML(e, idsIterator, inDoc, controlName)
case (e, _) ⇒ ConstraintValidation.fromXML(e, idsIterator)
}
}
val defaultAlert = AlertDetails.fromXML(defaultAlertElem, None)
// We expect only one "required" validation
allValidations collectFirst {
case v: RequiredValidation ⇒ v
} foreach { v ⇒
writeValidations(
inDoc,
controlName,
Required,
List(v)
)
}
// We expect only one "datatype" validation
allValidations collect {
case v: DatatypeValidation ⇒ v
} foreach { v ⇒
v.renameControlIfNeeded(inDoc, controlName, newAppearance.trimAllToOpt)
writeValidations(
inDoc,
controlName,
Type,
List(v)
)
}
// Several "constraint" validations are supported
writeValidations(
inDoc,
controlName,
Constraint,
allValidations collect { case v: ConstraintValidation ⇒ v }
)
writeAlerts(
inDoc,
controlName,
allValidations,
defaultAlert
)
}
private def writeValidations(
inDoc : NodeInfo,
controlName : String,
mip : MIP,
validations : List[Validation]
): Unit = {
val bind = findBindByName(inDoc, controlName).get
val existingAttributeValidations = mipAtts (bind, mip)
val existingElementValidations = mipElems(bind, mip)
val (_, mipElemQName) = mipToFBMIPQNames(mip)
validations match {
case Nil ⇒
delete(existingAttributeValidations ++ existingElementValidations)
case List(Validation(_, ErrorLevel, value, None)) ⇒
// Single validation without custom alert: set @fb:mipAttName and remove all nested elements
// See also: https://github.com/orbeon/orbeon-forms/issues/1829
// NOTE: We could optimize further by taking this branch if there is no type or required validation.
updateMipAsAttributeOnly(inDoc, controlName, mip.name, value)
delete(existingElementValidations)
case _ ⇒
val nestedValidations =
validations flatMap { case Validation(idOpt, level, value, _) ⇒
value.trimAllToOpt match {
case Some(nonEmptyValue) ⇒
val prefix = mipElemQName.getNamespaceURI match {
case FB ⇒ "fb" // also covers the case of `xxf:default` (Form Builder names here)
case XF ⇒ "xf" // case of `xf:type`, `xf:required`
}
val dummyMIPElem =
<xf:dummy
id={idOpt.orNull}
level={if (level != ErrorLevel) level.name else null}
value={if (mip != Type) nonEmptyValue else null}
xmlns:xf={XF}
xmlns:fb={FB}>{if (mip == Type) nonEmptyValue else null}</xf:dummy>
List(dummyMIPElem.copy(prefix = prefix, label = mipElemQName.getName): NodeInfo)
case None ⇒
Nil
}
}
delete(existingAttributeValidations ++ existingElementValidations)
insertElementsImposeOrder(into = bind, origin = nestedValidations, AllMIPNamesInOrder)
}
}
// Write resources and alerts for those that have resources
// If the default alert has resources, write it as well
private def writeAlerts(
inDoc : NodeInfo,
controlName : String,
validations : List[Validation],
defaultAlert : AlertDetails
): Unit = {
val alertsWithResources = {
val alertsForValidations =
validations collect
{ case Validation(_, _, _, Some(alert)) ⇒ alert }
val nonGlobalDefaultAlert =
! defaultAlert.global list defaultAlert
alertsForValidations ::: nonGlobalDefaultAlert
}
val messagesByLangForAllLangs = {
def messagesForAllLangs(a: AlertDetails) = {
val messagesMap = a.messages.toMap
allLangs(resourcesRoot) map { lang ⇒ lang → messagesMap.getOrElse(lang, "") }
}
val messagesByLang = (
alertsWithResources
flatMap messagesForAllLangs
groupBy (_._1)
map { case (lang, values) ⇒ lang → (values map (_._2)) }
)
// Make sure we have a default for all languages if there are no alerts or if some languages are missing
// from the alerts. We do want to update all languages on write, including removing unneeded <alert>
// elements.
val defaultMessages = allLangs(resourcesRoot) map (_ → Nil)
defaultMessages.toMap ++ messagesByLang toList
}
setControlResourcesWithLang(controlName, "alert", messagesByLangForAllLangs)
// Write alerts
val newAlertElements =
ensureCleanLHHAElements(
inDoc = inDoc,
controlName = controlName,
lhha = "alert",
count = alertsWithResources.size,
replace = true
)
// Insert validation attribute as needed
newAlertElements zip alertsWithResources foreach {
case (e, AlertDetails(Some(forValidationId), _, _)) ⇒
insert(into = e, origin = attributeInfo(VALIDATION_QNAME, forValidationId))
case _ ⇒ // no attributes to insert if this is not an alert linked to a validation
}
// Write global default alert if needed
if (defaultAlert.global) {
val newGlobalAlert = ensureCleanLHHAElements(inDoc, controlName, "alert", count = 1, replace = false).head
setvalue(newGlobalAlert /@ "ref", OldStandardAlertRef)
}
}
sealed trait Validation {
def idOpt : Option[String]
def level : ValidationLevel
def stringValue : String
def alert : Option[AlertDetails]
def toXML(forLang: String): sx.Elem
}
object Validation {
def unapply(v: Validation) =
Some((v.idOpt, v.level, v.stringValue, v.alert))
def levelFromXML(validationElem: NodeInfo) =
LevelByName(validationElem attValue "level")
}
// Required is either a simple boolean or a custom XPath expression
case class RequiredValidation(
idOpt : Option[String],
required : Either[Boolean, String],
alert : Option[AlertDetails]
) extends Validation {
import RequiredValidation._
def level = ErrorLevel
def stringValue = eitherToXPath(required)
def toXML(forLang: String): sx.Elem =
<validation type={Required.name} level={level.name} default-alert={alert.isEmpty.toString}>
<required>{eitherToXPath(required)}</required>
{alertOrPlaceholder(alert, forLang)}
</validation>
}
object RequiredValidation {
val DefaultRequireValidation = RequiredValidation(None, Left(false), None)
def fromForm(inDoc: NodeInfo, controlName: String): RequiredValidation =
findMIPs(inDoc, controlName, Required).headOption map {
case (idOpt, _, value, alertOpt) ⇒
RequiredValidation(idOpt, xpathOptToEither(Some(value)), alertOpt)
} getOrElse
DefaultRequireValidation
def fromXML(validationElem: NodeInfo, newIds: Iterator[String]): Option[RequiredValidation] = {
require(validationElem /@ "type" === Required.name)
val validationIdOpt = validationElem.id.trimAllToOpt orElse Some(newIds.next())
val required = validationElem / Required.name stringValue
Some(
RequiredValidation(
validationIdOpt,
xpathOptToEither(required.trimAllToOpt),
AlertDetails.fromValidationXML(validationElem, validationIdOpt)
)
)
}
private def xpathOptToEither(opt: Option[String]): Either[Boolean, String] =
opt match {
case Some("true()") ⇒ Left(true)
case Some("false()") | None ⇒ Left(false) // normalize missing MIP to false()
case Some(xpath) ⇒ Right(xpath)
}
private def eitherToXPath(required: Either[Boolean, String]) =
required match {
case Left(true) ⇒ "true()"
case Left(false) ⇒ "false()"
case Right(xpath) ⇒ xpath
}
}
case class DatatypeValidation(
idOpt : Option[String],
datatype : Either[(QName, Boolean), QName],
alert : Option[AlertDetails]
) extends Validation {
val datatypeQName = datatype.fold(_._1, identity)
def level = ErrorLevel
def stringValue = XMLUtils.buildQName(datatypeQName.getNamespacePrefix, datatypeQName.getName)
// Rename control element if needed when the datatype changes
def renameControlIfNeeded(inDoc: NodeInfo, controlName: String, newAppearanceOpt: Option[String]): Unit = {
val newDatatype = datatypeQName
for {
controlElem ← findControlByName(inDoc, controlName)
oldDatatype = DatatypeValidation.fromForm(inDoc, controlName).datatypeQName
oldAppearances = controlElem attTokens APPEARANCE_QNAME
(newElemName, newAppearanceAttOpt) ← BindingDescriptor.newElementName(
controlElem.uriQualifiedName,
oldDatatype,
oldAppearances,
newDatatype,
newAppearanceOpt,
componentBindings
)
} locally {
// Q: If binding changes, what about instance and bind templates? Should also be updated? Not a
// concrete case as of now, but can happen depending on which bindings are available.
val newControlElem = rename(controlElem, newElemName)
toggleAttribute(newControlElem, APPEARANCE_QNAME, newAppearanceAttOpt)
}
}
def toXML(forLang: String): sx.Elem = {
val builtinTypeString = datatype match {
case Left((name, _)) ⇒ name.getName
case _ ⇒ ""
}
val builtinTypeRequired = datatype match {
case Left((_, required)) ⇒ required.toString
case _ ⇒ ""
}
<validation type="datatype" id={idOpt.orNull} level={level.name} default-alert={alert.isEmpty.toString}>
<builtin-type>{builtinTypeString}</builtin-type>
<builtin-type-required>{builtinTypeRequired}</builtin-type-required>
<schema-type>{datatype.right.toOption map (_.getQualifiedName) getOrElse ""}</schema-type>
{alertOrPlaceholder(alert, forLang)}
</validation>
}
}
object DatatypeValidation {
private val DefaultDataTypeValidation =
DatatypeValidation(None, Left(XMLConstants.XS_STRING_QNAME → false), None)
// Create from a control name
def fromForm(inDoc: NodeInfo, controlName: String): DatatypeValidation = {
val bind = findBindByName(inDoc, controlName).get // require the bind
def builtinOrSchemaType(typ: String): Either[(QName, Boolean), QName] = {
val qName = bind.resolveQName(typ)
val isBuiltinType = Set(XF, XS)(qName.getNamespaceURI)
if (isBuiltinType)
Left(qName → (qName.getNamespaceURI == XS))
else
Right(qName)
}
findMIPs(inDoc, controlName, Type).headOption map {
case (idOpt, _, value, alertOpt) ⇒
DatatypeValidation(idOpt, builtinOrSchemaType(value), alertOpt)
} getOrElse
DefaultDataTypeValidation
}
def fromXML(
validationElem : NodeInfo,
newIds : Iterator[String],
inDoc : NodeInfo,
controlName : String
): Option[DatatypeValidation] = {
require(validationElem /@ "type" === "datatype")
val validationIdOpt = validationElem.id.trimAllToOpt orElse Some(newIds.next())
val datatype = {
val bind = findBindByName(inDoc, controlName).get
val builtinTypeStringOpt = (validationElem elemValue "builtin-type").trimAllToOpt
val builtinTypeRequired = (validationElem elemValue "builtin-type-required").trimAllToOpt contains "true"
val schemaTypeOpt = (validationElem elemValue "schema-type").trimAllToOpt
def builtinTypeQName: (QName, Boolean) = {
val builtinTypeString = builtinTypeStringOpt.get
// If a builtin type, we just have a local name
val nsURI = Model.uriForBuiltinTypeName(builtinTypeString, builtinTypeRequired)
// Namespace mapping must be in scope
val prefix = bind.nonEmptyPrefixesForURI(nsURI).sorted.head
new QName(builtinTypeString, new Namespace(prefix, nsURI)) → builtinTypeRequired
}
def schemaTypeQName: QName = {
val schemaType = schemaTypeOpt.get
// Schema type OTOH comes with a prefix if needed
val localname = parseQName(schemaType)._2
val namespace = valueNamespaceMappingScopeIfNeeded(bind, schemaType) map
{ case (prefix, uri) ⇒ new Namespace(prefix, uri) } getOrElse
Namespace.NO_NAMESPACE
new QName(localname, namespace)
}
Either.cond(schemaTypeOpt.isDefined, schemaTypeQName, builtinTypeQName)
}
Some(
DatatypeValidation(
validationIdOpt,
datatype,
AlertDetails.fromValidationXML(validationElem, validationIdOpt)
)
)
}
}
case class ConstraintValidation(
idOpt : Option[String],
level : ValidationLevel,
expression : String,
alert : Option[AlertDetails]
) extends Validation {
def stringValue = expression
def toXML(forLang: String): sx.Elem = {
val analyzed = ValidationFunction.analyzeKnownConstraint(expression)
<validation
type={analyzed map (_._1) getOrElse "formula"}
id={idOpt getOrElse ""}
level={level.name}
default-alert={alert.isEmpty.toString}>
<constraint
expression={if (analyzed.isEmpty) expression else ""}
argument={analyzed flatMap (_._2) getOrElse ""}
/>
{alertOrPlaceholder(alert, forLang)}
</validation>
}
}
object ConstraintValidation {
def fromForm(inDoc: NodeInfo, controlName: String): List[ConstraintValidation] =
findMIPs(inDoc, controlName, Constraint) map {
case (idOpt, level, value, alertOpt) ⇒
ConstraintValidation(idOpt, level, value, alertOpt)
}
def fromXML(validationElem: NodeInfo, newIds: Iterator[String]) = {
def normalizedAttOpt(attName: String) =
(validationElem child Constraint.name attValue attName headOption) flatMap trimAllToOpt
val constraintExpressionOpt = validationElem attValue "type" match {
case "formula" ⇒ normalizedAttOpt("expression")
case validationName ⇒ Some(s"xxf:$validationName(${normalizedAttOpt("argument") getOrElse ""})")
}
constraintExpressionOpt map { expr ⇒
val level = Validation.levelFromXML(validationElem)
val validationIdOpt = validationElem.id.trimAllToOpt orElse Some(newIds.next())
ConstraintValidation(
validationIdOpt,
level,
expr,
AlertDetails.fromValidationXML(validationElem, validationIdOpt)
)
}
}
}
case class AlertDetails(forValidationId: Option[String], messages: List[(String, String)], global: Boolean) {
require(! (global && forValidationId.isDefined))
require(messages.nonEmpty)
def default = forValidationId.isEmpty
// XML representation used by Form Builder
def toXML(forLang: String): sx.Elem = {
// The alert contains the message for the main language as an attribute, and the languages for the other
// languages so we can write them back.
<alert message={messages.toMap getOrElse (forLang, "")} global={global.toString}>{
messages collect {
case (lang, message) if lang != forLang ⇒
<message lang={lang} value={message}/>
}
}</alert>
}
}
object AlertDetails {
// Return supported alert details for the control
//
// - None if the alert message can't be found or if the alert/validation combination can't be handled by FB
// - alerts returned are either global (no validation/level specified) or for a single specific validation
def fromForm(inDoc: NodeInfo, controlName: String): Seq[AlertDetails] = {
val controlElem = findControlByName(inDoc, controlName).get
val alertResourcesForAllLangs = getControlResourcesWithLang(controlName, "alert")
def alertFromElement(e: NodeInfo) = {
def attValueOrNone(name: QName) = e att name map (_.stringValue) headOption
val validationAtt = attValueOrNone(VALIDATION_QNAME)
val levelAtt = attValueOrNone(LEVEL_QNAME)
val refAtt = attValueOrNone(REF_QNAME)
val isGlobal = refAtt contains OldStandardAlertRef
// Try to find the alert index from xf:alert/@ref
val alertIndexOpt =
if (isGlobal)
None
else
refAtt collect {
case OldAlertRefMatcher(`controlName`, _, index) ⇒ Option(index)
case NewAlertRefMatcher(`controlName`, _, index) ⇒ Option(index)
} map {
_ map (_.toInt - 1) getOrElse 0
}
// Try to find an existing resource for the given index if present, otherwise assume a blank value for
// the language
val alertsByLang = alertResourcesForAllLangs.to[List] map {
case (lang, alerts) ⇒ lang → (alertIndexOpt flatMap alerts.lift map (_.stringValue) getOrElse "")
}
val forValidations = gatherAlertValidations(validationAtt)
val forLevels = gatherAlertLevels(levelAtt)
// Form Builder only handles a subset of the allowed XForms mappings for now
def isDefault = forValidations.isEmpty && forLevels.isEmpty
def hasSingleValidation = forValidations.size == 1 && forLevels.isEmpty
def canHandle = isDefault || hasSingleValidation
canHandle option AlertDetails(forValidations.headOption, alertsByLang, isGlobal)
}
controlElem child "alert" flatMap alertFromElement toList
}
def fromXML(alertElem: NodeInfo, forValidationId: Option[String]) = {
val messageAtt = alertElem attValue "message"
val messagesElems = (alertElem child "message" toList) map {
message ⇒ (message attValue "lang", message attValue "value")
}
val isGlobal = (alertElem attValue "global") == "true"
AlertDetails(forValidationId, (currentLang, messageAtt) :: messagesElems, isGlobal)
}
def fromValidationXML(validationElem: NodeInfo, forValidationId: Option[String]) = {
val useDefaultAlert = validationElem /@ "default-alert" === "true"
def alertOpt = {
val alertElem = validationElem child "alert" headOption
alertElem map (AlertDetails.fromXML(_, forValidationId))
}
if (useDefaultAlert) None else alertOpt
}
}
private def findMIPs(inDoc: NodeInfo, controlName: String, mip: MIP) = {
val bind = findBindByName(inDoc, controlName).get // require the bind
val supportedAlerts = AlertDetails.fromForm(inDoc, controlName)
def findAlertForId(id: String) =
supportedAlerts find (_.forValidationId.contains(id))
def fromAttribute(a: NodeInfo) = {
val bindId = (a parent * head).id
(
None, // no id because we don't want that attribute to roundtrip
ErrorLevel,
a.stringValue,
findAlertForId(bindId)
)
}
def fromElement(e: NodeInfo) = {
val id = e.id
(
id.trimAllToOpt,
(e attValue LEVEL_QNAME).trimAllToOpt map LevelByName getOrElse ErrorLevel,
if (mip == Type) e.stringValue else e attValue VALUE_QNAME,
findAlertForId(id)
)
}
// Gather all validations (in fb:* except for type)
def attributeValidations = mipAtts (bind, mip) map fromAttribute
def elementValidations = mipElems(bind, mip) map fromElement
attributeValidations ++ elementValidations toList
}
private def mipAtts (bind: NodeInfo, mip: MIP) = bind /@ mipToFBMIPQNames(mip)._1
private def mipElems(bind: NodeInfo, mip: MIP) = bind / mipToFBMIPQNames(mip)._2
private def alertOrPlaceholder(alert: Option[AlertDetails], forLang: String) =
alert orElse Some(AlertDetails(None, List(currentLang → ""), global = false)) map (_.toXML(forLang)) get
}
|
joansmith/orbeon-forms
|
src/main/scala/org/orbeon/oxf/fb/AlertsAndConstraintsOps.scala
|
Scala
|
lgpl-2.1
| 23,756 |
package org.emailscript.dkim
import java.security.spec.X509EncodedKeySpec
import java.security.{KeyFactory, PublicKey}
import java.util.Base64
import org.emailscript.helpers.{DnsHelper, LoggerFactory}
import scala.collection.concurrent.RDCSS_Descriptor
object DkimDnsLookup {
val logger = LoggerFactory.getLogger(getClass)
val DKIM1 = "DKIM1"
val RSA = "rsa"
def removeWhiteSpace(text: String) = text.replaceAll("""[ \t\n\r"]""", "")
}
class DkimDnsLookup(helper: DnsHelper) {
import DkimDnsLookup._
// Find (and create) the first valid key we find
def getPublicKey(dnsHost: String): PublicKey = {
val records = helper.getDnsRecords(dnsHost, "TXT")
if (records.length == 0)
throw new Exception(s"No TXT records found in DNS entry for : $dnsHost")
val maps = records.map{record: String => DkimSignature.mapFields(removeWhiteSpace(record))}
val mapOption = maps.find(isValid(_))
if (mapOption.isEmpty){
val recordText = records.mkString(",")
throw new Exception(s"No valid TXT record found for $dnsHost, records: $recordText")
}
val fieldMap = mapOption.get
generatePublicKey(fieldMap.get("p").get)
}
private def isValid(map: Map[String, String]): Boolean = {
if (map.getOrElse("v", DKIM1) != DKIM1)
false
else if (!map.contains("p"))
false
else
true
}
private def generatePublicKey(encodedPublicKey: String): PublicKey = {
logger.debug(s"encoded key: $encodedPublicKey")
val decodedKey = Base64.getDecoder().decode(encodedPublicKey)
val keyFactory = KeyFactory.getInstance("RSA")
keyFactory.generatePublic(new X509EncodedKeySpec(decodedKey))
}
}
|
OdysseusLevy/emailscript
|
src/main/scala/org/emailscript/dkim/DkimDnsLookup.scala
|
Scala
|
lgpl-3.0
| 1,692 |
/* __ *\\
** ________ ___ / / ___ __ ____ Scala.js IR **
** / __/ __// _ | / / / _ | __ / // __/ (c) 2014, LAMP/EPFL **
** __\\ \\/ /__/ __ |/ /__/ __ |/_// /_\\ \\ http://scala-js.org/ **
** /____/\\___/_/ |_/____/_/ | |__/ /____/ **
** |/____/ **
\\* */
package org.scalajs.core.ir
import scala.annotation.switch
import java.io._
import java.net.URI
import scala.collection.mutable
import Position._
import Trees._
import Types._
import Tags._
import Utils.JumpBackByteArrayOutputStream
object Serializers {
def serialize(stream: OutputStream, tree: Tree): Unit = {
new Serializer().serialize(stream, tree)
}
def deserialize(stream: InputStream, version: String): Tree = {
new Deserializer(stream, version).deserialize()
}
// true for easier debugging (not for "production", it adds 8 bytes per node)
private final val UseDebugMagic = false
private final val DebugMagic = 0x3fa8ef84
private final val PosDebugMagic = 0x65f0ec32
private object PositionFormat {
/* Positions are serialized incrementally as diffs wrt the last position.
*
* Formats are (the first byte is decomposed in bits):
*
* 1st byte | next bytes | description
* -----------------------------------------
* ccccccc0 | | Column diff (7-bit signed)
* llllll01 | CC | Line diff (6-bit signed), column (8-bit unsigned)
* ____0011 | LL LL CC | Line diff (16-bit signed), column (8-bit unsigned)
* ____0111 | 12 bytes | File index, line, column (all 32-bit signed)
* 11111111 | | NoPosition (is not compared/stored in last position)
*
* Underscores are irrelevant and must be set to 0.
*/
final val Format1Mask = 0x01
final val Format1MaskValue = 0x00
final val Format1Shift = 1
final val Format2Mask = 0x03
final val Format2MaskValue = 0x01
final val Format2Shift = 2
final val Format3Mask = 0x0f
final val Format3MaskValue = 0x03
final val FormatFullMask = 0x0f
final val FormatFullMaskValue = 0x7
final val FormatNoPositionValue = -1
}
private final class Serializer {
private[this] val bufferUnderlying = new JumpBackByteArrayOutputStream
private[this] val buffer = new DataOutputStream(bufferUnderlying)
private[this] val files = mutable.ListBuffer.empty[URI]
private[this] val fileIndexMap = mutable.Map.empty[URI, Int]
private def fileToIndex(file: URI): Int =
fileIndexMap.getOrElseUpdate(file, (files += file).size - 1)
private[this] val strings = mutable.ListBuffer.empty[String]
private[this] val stringIndexMap = mutable.Map.empty[String, Int]
private def stringToIndex(str: String): Int =
stringIndexMap.getOrElseUpdate(str, (strings += str).size - 1)
private[this] var lastPosition: Position = Position.NoPosition
def serialize(stream: OutputStream, tree: Tree): Unit = {
// Write tree to buffer and record files and strings
writeTree(tree)
val s = new DataOutputStream(stream)
// Emit the files
s.writeInt(files.size)
files.foreach(f => s.writeUTF(f.toString))
// Emit the strings
s.writeInt(strings.size)
strings.foreach(s.writeUTF)
// Paste the buffer
bufferUnderlying.writeTo(s)
s.flush()
}
def writeTree(tree: Tree): Unit = {
import buffer._
writePosition(tree.pos)
tree match {
case EmptyTree =>
writeByte(TagEmptyTree)
case VarDef(ident, vtpe, mutable, rhs) =>
writeByte(TagVarDef)
writeIdent(ident); writeType(vtpe); writeBoolean(mutable); writeTree(rhs)
case ParamDef(ident, ptpe, mutable, rest) =>
writeByte(TagParamDef)
writeIdent(ident); writeType(ptpe); writeBoolean(mutable); writeBoolean(rest)
case Skip() =>
writeByte(TagSkip)
case Block(stats) =>
writeByte(TagBlock)
writeTrees(stats)
case Labeled(label, tpe, body) =>
writeByte(TagLabeled)
writeIdent(label); writeType(tpe); writeTree(body)
case Assign(lhs, rhs) =>
writeByte(TagAssign)
writeTree(lhs); writeTree(rhs)
case Return(expr, label) =>
writeByte(TagReturn)
writeTree(expr); writeOptIdent(label)
case If(cond, thenp, elsep) =>
writeByte(TagIf)
writeTree(cond); writeTree(thenp); writeTree(elsep)
writeType(tree.tpe)
case While(cond, body, label) =>
writeByte(TagWhile)
writeTree(cond); writeTree(body); writeOptIdent(label)
case DoWhile(body, cond, label) =>
writeByte(TagDoWhile)
writeTree(body); writeTree(cond); writeOptIdent(label)
case Try(block, errVar, handler, finalizer) =>
writeByte(TagTry)
writeTree(block); writeIdent(errVar); writeTree(handler); writeTree(finalizer)
writeType(tree.tpe)
case Throw(expr) =>
writeByte(TagThrow)
writeTree(expr)
case Continue(label) =>
writeByte(TagContinue)
writeOptIdent(label)
case Match(selector, cases, default) =>
writeByte(TagMatch)
writeTree(selector)
writeInt(cases.size)
cases foreach { caze =>
writeTrees(caze._1); writeTree(caze._2)
}
writeTree(default)
writeType(tree.tpe)
case Debugger() =>
writeByte(TagDebugger)
case New(cls, ctor, args) =>
writeByte(TagNew)
writeClassType(cls); writeIdent(ctor); writeTrees(args)
case LoadModule(cls) =>
writeByte(TagLoadModule)
writeClassType(cls)
case StoreModule(cls, value) =>
writeByte(TagStoreModule)
writeClassType(cls); writeTree(value)
case Select(qualifier, item) =>
writeByte(TagSelect)
writeTree(qualifier); writeIdent(item)
writeType(tree.tpe)
case Apply(receiver, method, args) =>
writeByte(TagApply)
writeTree(receiver); writeIdent(method); writeTrees(args)
writeType(tree.tpe)
case ApplyStatically(receiver, cls, method, args) =>
writeByte(TagApplyStatically)
writeTree(receiver); writeClassType(cls); writeIdent(method); writeTrees(args)
writeType(tree.tpe)
case ApplyStatic(cls, method, args) =>
writeByte(TagApplyStatic)
writeClassType(cls); writeIdent(method); writeTrees(args)
writeType(tree.tpe)
case UnaryOp(op, lhs) =>
writeByte(TagUnaryOp)
writeByte(op); writeTree(lhs)
case BinaryOp(op, lhs, rhs) =>
writeByte(TagBinaryOp)
writeByte(op); writeTree(lhs); writeTree(rhs)
case NewArray(tpe, lengths) =>
writeByte(TagNewArray)
writeArrayType(tpe); writeTrees(lengths)
case ArrayValue(tpe, elems) =>
writeByte(TagArrayValue)
writeArrayType(tpe); writeTrees(elems)
case ArrayLength(array) =>
writeByte(TagArrayLength)
writeTree(array)
case ArraySelect(array, index) =>
writeByte(TagArraySelect)
writeTree(array); writeTree(index)
writeType(tree.tpe)
case RecordValue(tpe, elems) =>
writeByte(TagRecordValue)
writeType(tpe); writeTrees(elems)
case IsInstanceOf(expr, cls) =>
writeByte(TagIsInstanceOf)
writeTree(expr); writeReferenceType(cls)
case AsInstanceOf(expr, cls) =>
writeByte(TagAsInstanceOf)
writeTree(expr); writeReferenceType(cls)
case Unbox(expr, charCode) =>
writeByte(TagUnbox)
writeTree(expr); writeByte(charCode.toByte)
case GetClass(expr) =>
writeByte(TagGetClass)
writeTree(expr)
case CallHelper(helper, args) =>
writeByte(TagCallHelper)
writeString(helper); writeTrees(args)
writeType(tree.tpe)
case JSNew(ctor, args) =>
writeByte(TagJSNew)
writeTree(ctor); writeTrees(args)
case JSDotSelect(qualifier, item) =>
writeByte(TagJSDotSelect)
writeTree(qualifier); writeIdent(item)
case JSBracketSelect(qualifier, item) =>
writeByte(TagJSBracketSelect)
writeTree(qualifier); writeTree(item)
case JSFunctionApply(fun, args) =>
writeByte(TagJSFunctionApply)
writeTree(fun); writeTrees(args)
case JSDotMethodApply(receiver, method, args) =>
writeByte(TagJSDotMethodApply)
writeTree(receiver); writeIdent(method); writeTrees(args)
case JSBracketMethodApply(receiver, method, args) =>
writeByte(TagJSBracketMethodApply)
writeTree(receiver); writeTree(method); writeTrees(args)
case JSSpread(items) =>
writeByte(TagJSSpread)
writeTree(items)
case JSDelete(prop) =>
writeByte(TagJSDelete)
writeTree(prop)
case JSUnaryOp(op, lhs) =>
writeByte(TagJSUnaryOp)
writeInt(op); writeTree(lhs)
case JSBinaryOp(op, lhs, rhs) =>
writeByte(TagJSBinaryOp)
writeInt(op); writeTree(lhs); writeTree(rhs)
case JSArrayConstr(items) =>
writeByte(TagJSArrayConstr)
writeTrees(items)
case JSObjectConstr(fields) =>
writeByte(TagJSObjectConstr)
writeInt(fields.size)
fields foreach { field =>
writePropertyName(field._1); writeTree(field._2)
}
case JSEnvInfo() =>
writeByte(TagJSEnvInfo)
// Literals
case Undefined() =>
writeByte(TagUndefined)
case UndefinedParam() =>
writeByte(TagUndefinedParam)
writeType(tree.tpe)
case Null() =>
writeByte(TagNull)
case BooleanLiteral(value) =>
writeByte(TagBooleanLiteral)
writeBoolean(value)
case IntLiteral(value) =>
writeByte(TagIntLiteral)
writeInt(value)
case LongLiteral(value) =>
writeByte(TagLongLiteral)
writeLong(value)
case FloatLiteral(value) =>
writeByte(TagFloatLiteral)
writeFloat(value)
case DoubleLiteral(value) =>
writeByte(TagDoubleLiteral)
writeDouble(value)
case StringLiteral(value) =>
writeByte(TagStringLiteral)
writeString(value)
case ClassOf(cls) =>
writeByte(TagClassOf)
writeReferenceType(cls)
case VarRef(ident) =>
writeByte(TagVarRef)
writeIdent(ident)
writeType(tree.tpe)
case This() =>
writeByte(TagThis)
writeType(tree.tpe)
case Closure(captureParams, params, body, captureValues) =>
writeByte(TagClosure)
writeTrees(captureParams)
writeTrees(params)
writeTree(body)
writeTrees(captureValues)
case tree: ClassDef =>
val ClassDef(name, kind, superClass, parents, jsName, defs) = tree
writeByte(TagClassDef)
writeIdent(name)
writeByte(ClassKind.toByte(kind))
writeOptIdent(superClass)
writeIdents(parents)
writeString(jsName.getOrElse(""))
writeTrees(defs)
writeInt(tree.optimizerHints.bits)
case FieldDef(ident, ftpe, mutable) =>
writeByte(TagFieldDef)
writeIdent(ident); writeType(ftpe); writeBoolean(mutable)
case methodDef: MethodDef =>
val MethodDef(static, name, args, resultType, body) = methodDef
writeByte(TagMethodDef)
writeOptHash(methodDef.hash)
// Prepare for back-jump and write dummy length
bufferUnderlying.markJump()
writeInt(-1)
// Write out method def
writeBoolean(static); writePropertyName(name)
writeTrees(args); writeType(resultType); writeTree(body)
writeInt(methodDef.optimizerHints.bits)
// Jump back and write true length
val length = bufferUnderlying.jumpBack()
writeInt(length)
bufferUnderlying.continue()
case PropertyDef(name, getter, arg, setter) =>
writeByte(TagPropertyDef)
writePropertyName(name); writeTree(getter); writeTree(arg); writeTree(setter)
case ConstructorExportDef(fullName, args, body) =>
writeByte(TagConstructorExportDef)
writeString(fullName); writeTrees(args); writeTree(body)
case ModuleExportDef(fullName) =>
writeByte(TagModuleExportDef)
writeString(fullName)
}
if (UseDebugMagic)
writeInt(DebugMagic)
}
def writeTrees(trees: List[Tree]): Unit = {
buffer.writeInt(trees.size)
trees.foreach(writeTree)
}
def writeIdent(ident: Ident): Unit = {
writePosition(ident.pos)
writeString(ident.name); writeString(ident.originalName.getOrElse(""))
}
def writeIdents(idents: List[Ident]): Unit = {
buffer.writeInt(idents.size)
idents.foreach(writeIdent)
}
def writeOptIdent(optIdent: Option[Ident]): Unit = {
buffer.writeBoolean(optIdent.isDefined)
optIdent.foreach(writeIdent)
}
def writeType(tpe: Type): Unit = {
tpe match {
case AnyType => buffer.write(TagAnyType)
case NothingType => buffer.write(TagNothingType)
case UndefType => buffer.write(TagUndefType)
case BooleanType => buffer.write(TagBooleanType)
case IntType => buffer.write(TagIntType)
case LongType => buffer.write(TagLongType)
case FloatType => buffer.write(TagFloatType)
case DoubleType => buffer.write(TagDoubleType)
case StringType => buffer.write(TagStringType)
case NullType => buffer.write(TagNullType)
case NoType => buffer.write(TagNoType)
case tpe: ClassType =>
buffer.write(TagClassType)
writeClassType(tpe)
case tpe: ArrayType =>
buffer.write(TagArrayType)
writeArrayType(tpe)
case RecordType(fields) =>
buffer.write(TagRecordType)
buffer.writeInt(fields.size)
for (RecordType.Field(name, originalName, tpe, mutable) <- fields) {
writeString(name)
writeString(originalName.getOrElse(""))
writeType(tpe)
buffer.writeBoolean(mutable)
}
}
}
def writeClassType(tpe: ClassType): Unit =
writeString(tpe.className)
def writeArrayType(tpe: ArrayType): Unit = {
writeString(tpe.baseClassName)
buffer.writeInt(tpe.dimensions)
}
def writeReferenceType(tpe: ReferenceType): Unit =
writeType(tpe)
def writePropertyName(name: PropertyName): Unit = {
name match {
case name: Ident => buffer.writeBoolean(true); writeIdent(name)
case name: StringLiteral => buffer.writeBoolean(false); writeTree(name)
}
}
def writePosition(pos: Position): Unit = {
import buffer._
import PositionFormat._
def writeFull(): Unit = {
writeByte(FormatFullMaskValue)
writeInt(fileToIndex(pos.source))
writeInt(pos.line)
writeInt(pos.column)
}
if (pos == Position.NoPosition) {
writeByte(FormatNoPositionValue)
} else if (lastPosition == Position.NoPosition ||
pos.source != lastPosition.source) {
writeFull()
lastPosition = pos
} else {
val line = pos.line
val column = pos.column
val lineDiff = line - lastPosition.line
val columnDiff = column - lastPosition.column
val columnIsByte = column >= 0 && column < 256
if (lineDiff == 0 && columnDiff >= -64 && columnDiff < 64) {
writeByte((columnDiff << Format1Shift) | Format1MaskValue)
} else if (lineDiff >= -32 && lineDiff < 32 && columnIsByte) {
writeByte((lineDiff << Format2Shift) | Format2MaskValue)
writeByte(column)
} else if (lineDiff >= Short.MinValue && lineDiff <= Short.MaxValue && columnIsByte) {
writeByte(Format3MaskValue)
writeShort(lineDiff)
writeByte(column)
} else {
writeFull()
}
lastPosition = pos
}
if (UseDebugMagic)
writeInt(PosDebugMagic)
}
def writeOptHash(optHash: Option[TreeHash]): Unit = {
buffer.writeBoolean(optHash.isDefined)
for (hash <- optHash) {
buffer.write(hash.treeHash)
buffer.write(hash.posHash)
}
}
def writeString(s: String): Unit =
buffer.writeInt(stringToIndex(s))
}
private final class Deserializer(stream: InputStream, sourceVersion: String) {
private[this] val useHacks060 = sourceVersion == "0.6.0"
private[this] val input = new DataInputStream(stream)
private[this] val files =
Array.fill(input.readInt())(new URI(input.readUTF()))
private[this] val strings =
Array.fill(input.readInt())(input.readUTF())
private[this] var lastPosition: Position = Position.NoPosition
private[this] var foundArguments: Boolean = false
def deserialize(): Tree = {
readTree()
}
def readTree(): Tree = {
import input._
implicit val pos = readPosition()
val tag = readByte()
val result = (tag: @switch) match {
case TagEmptyTree => EmptyTree
case TagVarDef => VarDef(readIdent(), readType(), readBoolean(), readTree())
case TagParamDef =>
ParamDef(readIdent(), readType(), readBoolean(),
rest = if (useHacks060) false else readBoolean())
case TagSkip => Skip()
case TagBlock => Block(readTrees())
case TagLabeled => Labeled(readIdent(), readType(), readTree())
case TagAssign => Assign(readTree(), readTree())
case TagReturn => Return(readTree(), readOptIdent())
case TagIf => If(readTree(), readTree(), readTree())(readType())
case TagWhile => While(readTree(), readTree(), readOptIdent())
case TagDoWhile => DoWhile(readTree(), readTree(), readOptIdent())
case TagTry => Try(readTree(), readIdent(), readTree(), readTree())(readType())
case TagThrow => Throw(readTree())
case TagContinue => Continue(readOptIdent())
case TagMatch =>
Match(readTree(), List.fill(readInt()) {
(readTrees().map(_.asInstanceOf[Literal]), readTree())
}, readTree())(readType())
case TagDebugger => Debugger()
case TagNew => New(readClassType(), readIdent(), readTrees())
case TagLoadModule => LoadModule(readClassType())
case TagStoreModule => StoreModule(readClassType(), readTree())
case TagSelect => Select(readTree(), readIdent())(readType())
case TagApply => Apply(readTree(), readIdent(), readTrees())(readType())
case TagApplyStatically => ApplyStatically(readTree(), readClassType(), readIdent(), readTrees())(readType())
case TagApplyStatic => ApplyStatic(readClassType(), readIdent(), readTrees())(readType())
case TagUnaryOp => UnaryOp(readByte(), readTree())
case TagBinaryOp => BinaryOp(readByte(), readTree(), readTree())
case TagNewArray => NewArray(readArrayType(), readTrees())
case TagArrayValue => ArrayValue(readArrayType(), readTrees())
case TagArrayLength => ArrayLength(readTree())
case TagArraySelect => ArraySelect(readTree(), readTree())(readType())
case TagRecordValue => RecordValue(readType().asInstanceOf[RecordType], readTrees())
case TagIsInstanceOf => IsInstanceOf(readTree(), readReferenceType())
case TagAsInstanceOf => AsInstanceOf(readTree(), readReferenceType())
case TagUnbox => Unbox(readTree(), readByte().toChar)
case TagGetClass => GetClass(readTree())
case TagCallHelper => CallHelper(readString(), readTrees())(readType())
case TagJSNew => JSNew(readTree(), readTrees())
case TagJSDotSelect => JSDotSelect(readTree(), readIdent())
case TagJSBracketSelect => JSBracketSelect(readTree(), readTree())
case TagJSFunctionApply => JSFunctionApply(readTree(), readTrees())
case TagJSDotMethodApply => JSDotMethodApply(readTree(), readIdent(), readTrees())
case TagJSBracketMethodApply => JSBracketMethodApply(readTree(), readTree(), readTrees())
case TagJSSpread => JSSpread(readTree())
case TagJSDelete => JSDelete(readTree())
case TagJSUnaryOp => JSUnaryOp(readInt(), readTree())
case TagJSBinaryOp => JSBinaryOp(readInt(), readTree(), readTree())
case TagJSArrayConstr => JSArrayConstr(readTrees())
case TagJSObjectConstr =>
JSObjectConstr(List.fill(readInt())((readPropertyName(), readTree())))
case TagJSEnvInfo => JSEnvInfo()
case TagUndefined => Undefined()
case TagUndefinedParam => UndefinedParam()(readType())
case TagNull => Null()
case TagBooleanLiteral => BooleanLiteral(readBoolean())
case TagIntLiteral => IntLiteral(readInt())
case TagLongLiteral => LongLiteral(readLong())
case TagFloatLiteral => FloatLiteral(readFloat())
case TagDoubleLiteral => DoubleLiteral(readDouble())
case TagStringLiteral => StringLiteral(readString())
case TagClassOf => ClassOf(readReferenceType())
case TagVarRef =>
val result = VarRef(readIdent())(readType())
if (useHacks060 && result.ident.name == "arguments")
foundArguments = true
result
case TagThis => This()(readType())
case TagClosure =>
Closure(readParamDefs(), readParamDefs(), readTree(), readTrees())
case TagClassDef =>
val name = readIdent()
val kind = ClassKind.fromByte(readByte())
val superClass = readOptIdent()
val parents = readIdents()
val jsName = Some(readString()).filter(_ != "")
val defs = readTrees()
val optimizerHints = new OptimizerHints(readInt())
ClassDef(name, kind, superClass, parents, jsName, defs)(optimizerHints)
case TagFieldDef =>
FieldDef(readIdent(), readType(), readBoolean())
case TagMethodDef =>
val optHash = readOptHash()
// read and discard the length
val len = readInt()
assert(len >= 0)
val result = MethodDef(readBoolean(), readPropertyName(),
readParamDefs(), readType(), readTree())(
new OptimizerHints(readInt()), optHash)
if (foundArguments) {
foundArguments = false
new RewriteArgumentsTransformer().transformMethodDef(result)
} else {
result
}
case TagPropertyDef =>
PropertyDef(readPropertyName(), readTree(),
readTree().asInstanceOf[ParamDef], readTree())
case TagConstructorExportDef =>
val result = ConstructorExportDef(readString(), readParamDefs(), readTree())
if (foundArguments) {
foundArguments = false
new RewriteArgumentsTransformer().transformConstructorExportDef(result)
} else {
result
}
case TagModuleExportDef =>
ModuleExportDef(readString())
}
if (UseDebugMagic) {
val magic = readInt()
assert(magic == DebugMagic,
s"Bad magic after reading a ${result.getClass}!")
}
result
}
def readTrees(): List[Tree] =
List.fill(input.readInt())(readTree())
def readParamDefs(): List[ParamDef] =
readTrees().map(_.asInstanceOf[ParamDef])
def readIdent(): Ident = {
implicit val pos = readPosition()
val name = readString()
val originalName = readString()
Ident(name, if (originalName.isEmpty) None else Some(originalName))
}
def readIdents(): List[Ident] =
List.fill(input.readInt())(readIdent())
def readOptIdent(): Option[Ident] = {
if (input.readBoolean()) Some(readIdent())
else None
}
def readType(): Type = {
val tag = input.readByte()
(tag: @switch) match {
case TagAnyType => AnyType
case TagNothingType => NothingType
case TagUndefType => UndefType
case TagBooleanType => BooleanType
case TagIntType => IntType
case TagLongType => LongType
case TagFloatType => FloatType
case TagDoubleType => DoubleType
case TagStringType => StringType
case TagNullType => NullType
case TagNoType => NoType
case TagClassType => readClassType()
case TagArrayType => readArrayType()
case TagRecordType =>
RecordType(List.fill(input.readInt()) {
val name = readString()
val originalName = readString()
val tpe = readType()
val mutable = input.readBoolean()
RecordType.Field(name,
if (originalName.isEmpty) None else Some(originalName),
tpe, mutable)
})
}
}
def readClassType(): ClassType =
ClassType(readString())
def readArrayType(): ArrayType =
ArrayType(readString(), input.readInt())
def readReferenceType(): ReferenceType =
readType().asInstanceOf[ReferenceType]
def readPropertyName(): PropertyName = {
if (input.readBoolean()) readIdent()
else readTree().asInstanceOf[StringLiteral]
}
def readPosition(): Position = {
import input._
import PositionFormat._
val first = readByte()
val result = if (first == FormatNoPositionValue) {
Position.NoPosition
} else {
val result = if ((first & FormatFullMask) == FormatFullMaskValue) {
val file = files(readInt())
val line = readInt()
val column = readInt()
Position(file, line, column)
} else {
assert(lastPosition != NoPosition,
"Position format error: first position must be full")
if ((first & Format1Mask) == Format1MaskValue) {
val columnDiff = first >> Format1Shift
Position(lastPosition.source, lastPosition.line,
lastPosition.column + columnDiff)
} else if ((first & Format2Mask) == Format2MaskValue) {
val lineDiff = first >> Format2Shift
val column = readByte() & 0xff // unsigned
Position(lastPosition.source,
lastPosition.line + lineDiff, column)
} else {
assert((first & Format3Mask) == Format3MaskValue,
s"Position format error: first byte $first does not match any format")
val lineDiff = readShort()
val column = readByte() & 0xff // unsigned
Position(lastPosition.source,
lastPosition.line + lineDiff, column)
}
}
lastPosition = result
result
}
if (UseDebugMagic) {
val magic = readInt()
assert(magic == PosDebugMagic,
s"Bad magic after reading position with first byte $first")
}
result
}
def readOptHash(): Option[TreeHash] = {
if (input.readBoolean()) {
val treeHash = new Array[Byte](20)
val posHash = new Array[Byte](20)
input.readFully(treeHash)
input.readFully(posHash)
Some(new TreeHash(treeHash, posHash))
} else None
}
def readString(): String = {
strings(input.readInt())
}
}
private class RewriteArgumentsTransformer extends Transformers.Transformer {
import RewriteArgumentsTransformer._
private[this] var paramToIndex: Map[String, Int] = _
def transformMethodDef(tree: MethodDef): MethodDef = {
/* Ideally, we would re-hash the new MethodDef here, but we cannot do
* that because it prevents the JS version of the tools to link.
* Since the hashes of exported methods are not used by our pipeline
* anyway, we simply put None.
*/
val MethodDef(static, name, args, resultType, body) = tree
setupParamToIndex(args)
MethodDef(static, name, List(argumentsParamDef(tree.pos)),
resultType, transform(body, isStat = resultType == NoType))(
tree.optimizerHints, None)(tree.pos)
}
def transformConstructorExportDef(
tree: ConstructorExportDef): ConstructorExportDef = {
val ConstructorExportDef(name, args, body) = tree
setupParamToIndex(args)
ConstructorExportDef(name, List(argumentsParamDef(tree.pos)),
transformStat(body))(tree.pos)
}
private def setupParamToIndex(params: List[ParamDef]): Unit =
paramToIndex = params.map(_.name.name).zipWithIndex.toMap
private def argumentsParamDef(implicit pos: Position): ParamDef =
ParamDef(Ident(ArgumentsName), AnyType, mutable = false, rest = true)
private def argumentsRef(implicit pos: Position): VarRef =
VarRef(Ident(ArgumentsName))(AnyType)
override def transform(tree: Tree, isStat: Boolean): Tree = tree match {
case VarRef(Ident(name, origName)) =>
implicit val pos = tree.pos
paramToIndex.get(name).fold {
if (name == "arguments") argumentsRef
else tree
} { paramIndex =>
JSBracketSelect(argumentsRef, IntLiteral(paramIndex))
}
case _ =>
super.transform(tree, isStat)
}
}
private object RewriteArgumentsTransformer {
private final val ArgumentsName = "$arguments"
}
}
|
matthughes/scala-js
|
ir/src/main/scala/org/scalajs/core/ir/Serializers.scala
|
Scala
|
bsd-3-clause
| 30,408 |
package org.vitrivr.adampro.storage.engine
import java.io.Serializable
import org.apache.spark.annotation.Experimental
import org.apache.spark.sql.{DataFrame, SaveMode}
import org.vitrivr.adampro.data.datatypes.AttributeTypes
import org.vitrivr.adampro.data.entity.AttributeDefinition
import org.vitrivr.adampro.process.SharedComponentContext
import org.vitrivr.adampro.query.query.Predicate
import scala.util.{Failure, Success, Try}
/**
* ADAMpro
*
* Ivan Giangreco
* August 2017
*/
@Experimental class CompoundEngine(fullAccessHandlerName : String, randomAccessHandlerName : String)(@transient override implicit val ac: SharedComponentContext) extends Engine()(ac) with Serializable {
override val name: String = "compound"
override def supports = Seq(AttributeTypes.AUTOTYPE, AttributeTypes.INTTYPE, AttributeTypes.LONGTYPE, AttributeTypes.FLOATTYPE, AttributeTypes.DOUBLETYPE, AttributeTypes.STRINGTYPE, AttributeTypes.TEXTTYPE, AttributeTypes.BOOLEANTYPE, AttributeTypes.GEOGRAPHYTYPE, AttributeTypes.GEOMETRYTYPE, AttributeTypes.VECTORTYPE, AttributeTypes.SPARSEVECTORTYPE)
override def specializes: Seq[AttributeTypes.AttributeType] = Seq()
override val repartitionable: Boolean = true
//set engines lazy!
private lazy val fullAccessEngine = ac.storageManager.get(fullAccessHandlerName).get.engine
private lazy val randomAccessEngine = ac.storageManager.get(randomAccessHandlerName).get.engine
/**
*
* @param props
*/
def this(props: Map[String, String])(implicit ac: SharedComponentContext) {
this(props.get("fullAccessHandler").get, props.get("randomAccessHandler").get)(ac)
}
/**
* Create the entity.
*
* @param storename adapted entityname to store feature to
* @param attributes attributes of the entity (w.r.t. handler)
* @param params creation parameters
* @return options to store
*/
override def create(storename: String, attributes: Seq[AttributeDefinition], params: Map[String, String])(implicit ac: SharedComponentContext): Try[Map[String, String]] = {
try{
fullAccessEngine.create(storename, attributes, params)
randomAccessEngine.create(storename, attributes, params)
} catch {
case e : Exception => Failure(e)
}
}
/**
* Check if entity exists.
*
* @param storename adapted entityname to store feature to
* @return
*/
override def exists(storename: String)(implicit ac: SharedComponentContext): Try[Boolean] = {
try {
Success(fullAccessEngine.exists(storename).get || randomAccessEngine.exists(storename).get)
} catch {
case e : Exception => Failure(e)
}
}
/**
* Read entity.
*
* @param storename adapted entityname to store feature to
* @param attributes the attributes to read
* @param predicates filtering predicates (only applied if possible)
* @param params reading parameters
* @return
*/
override def read(storename: String, attributes: Seq[AttributeDefinition], predicates: Seq[Predicate], params: Map[String, String])(implicit ac: SharedComponentContext): Try[DataFrame] = {
if (predicates.nonEmpty) {
randomAccessEngine.read(storename, attributes, predicates, params)
} else {
fullAccessEngine.read(storename, attributes, predicates, params)
}
}
/**
* Write entity.
*
* @param storename adapted entityname to store feature to
* @param df data
* @param attributes attributes to store
* @param mode save mode (append, overwrite, ...)
* @param params writing parameters
* @return new options to store
*/
override def write(storename: String, df: DataFrame, attributes: Seq[AttributeDefinition], mode: SaveMode, params: Map[String, String])(implicit ac: SharedComponentContext): Try[Map[String, String]] = {
try{
randomAccessEngine.write(storename, df, attributes, mode, params)
fullAccessEngine.write(storename, df, attributes, mode, params)
} catch {
case e : Exception => Failure(e)
}
}
/**
* Drop the entity.
*
* @param storename adapted entityname to store feature to
* @return
*/
override def drop(storename: String)(implicit ac: SharedComponentContext): Try[Void] = {
try{
randomAccessEngine.drop(storename)
fullAccessEngine.drop(storename)
Success(null)
} catch {
case e : Exception => Failure(e)
}
}
}
|
dbisUnibas/ADAMpro
|
src/main/scala/org/vitrivr/adampro/storage/engine/CompoundEngine.scala
|
Scala
|
mit
| 4,434 |
// Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
// Licensed under the Apache License, Version 2.0 (see LICENSE).
package org.pantsbuild.zinc.logging
import java.io.{ File, PrintWriter, StringWriter }
import com.google.common.base.Charsets
import com.google.common.io.Files
import sbt.{ ConsoleOut, Level }
import org.junit.runner.RunWith
import org.scalatest.WordSpec
import org.scalatest.junit.JUnitRunner
import org.scalatest.matchers.MustMatchers
@RunWith(classOf[JUnitRunner])
class LoggersSpec extends WordSpec with MustMatchers {
"Loggers" should {
"be compound" in {
// create a compound logger
val stdout = new StringWriter()
val captureFile = File.createTempFile("loggers", "spec")
val log =
Loggers.create(
Level.Debug,
false,
Seq(),
ConsoleOut.printWriterOut(new PrintWriter(stdout)),
Some(captureFile)
)
// log something, and confirm it's captured in both locations
val msg = "this is a log message!"
log.debug(msg)
stdout.toString must include(msg)
Files.toString(captureFile, Charsets.UTF_8) must include(msg)
}
}
}
|
laurentgo/pants
|
tests/scala/org/pantsbuild/zinc/logging/LoggersSpec.scala
|
Scala
|
apache-2.0
| 1,189 |
class Ennemi(var nomo: String) extends Personnage(nomo, 2, 10) {
def battre(perso: PersonnagePrincipal) {
perso coup puissance
}
}
|
r0mdau/mario-chance-game
|
src/Ennemi.scala
|
Scala
|
apache-2.0
| 142 |
/**
* Copyright 2016 Matthew Farmer
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
**/
package frmr.scyig.matching
import frmr.scyig.matching.models._
import java.util.UUID
import scala.math._
import scala.util.Random
import net.liftweb.common._
/**
* Determines a list of participants that would be good to fill the next open
* slot in the partial match.
*/
trait ParticipantSuggester extends Loggable {
def suggestParticipants(partialMatch: Option[PartialRoundMatch]): Seq[Participant]
def withParticipants(participants: Seq[Participant]): ParticipantSuggester
def withoutParticipant(participantId: UUID): ParticipantSuggester
val participants: Seq[Participant]
lazy val teams: Seq[CompetingTeam] = participants.collect {
case team: CompetingTeam => team
}
lazy val presidingJudges: Seq[Judge] = participants.collect {
case judge: Judge if judge.isPresiding => judge
}
lazy val scoringJudges: Seq[Judge] = participants.collect {
case judge: Judge if judge.isScoring => judge
}
def suggestJudges(partialMatch: Option[PartialRoundMatch]): Seq[Participant] = {
partialMatch match {
case Some(_: MatchedTeams) =>
presidingJudges
case Some(_: MatchedTeamsWithPresidingJudge) =>
scoringJudges
case _ =>
Seq()
}
}
}
/**
* Suggests teams in a randomized fashion. Presiding and scoring judges are suggested in the order
* they appeared in the original participants sequence.
*/
case class RandomizedParticipantSuggester(
override val participants: Seq[Participant]
) extends ParticipantSuggester {
val randomizedTeams = Random.shuffle(teams)
def suggestParticipants(partialMatch: Option[PartialRoundMatch]): Seq[Participant] = {
partialMatch match {
case None | Some(MatchSeed(_)) =>
randomizedTeams
case _ =>
suggestJudges(partialMatch)
}
}
def withoutParticipant(participantId: UUID): ParticipantSuggester = {
new RandomizedParticipantSuggester(
participants.filterNot(_.id == participantId)
)
}
def withParticipants(participants: Seq[Participant]): ParticipantSuggester = {
new RandomizedParticipantSuggester(participants)
}
}
/**
* Suggests teams in the order of least absolute value difference in overall score. Presiding and
* scoring judges are suggested in the order they appeared in the original participants sequence.
*/
case class CompetitiveParticipantSuggester(
override val participants: Seq[Participant]
) extends ParticipantSuggester {
def suggestParticipants(partialMatch: Option[PartialRoundMatch]): Seq[Participant] = {
partialMatch match {
case None =>
teams.sortBy(_.averageScore).reverse
case Some(MatchSeed(initialTeam)) =>
teams.sortBy { team =>
abs(team.averageScore - initialTeam.averageScore)
}
case _ =>
suggestJudges(partialMatch)
}
}
def withoutParticipant(participantId: UUID): ParticipantSuggester = {
new CompetitiveParticipantSuggester(
participants.filterNot(_.id == participantId)
)
}
def withParticipants(participants: Seq[Participant]): ParticipantSuggester = {
new CompetitiveParticipantSuggester(participants)
}
}
/**
* Suggests teams using an "opportunity power match" algorithm. For a new round of matching, teams
* with the lowest scores are attempted to be scheduled first. If the seed team lacks a score, the
* algorithm will suggest all other teams lacking a score first. If the seed team does have a score,
* then the list of other teams is split in half into two tiers. It will then guess which tier the
* seed team is in. It will then suggest all the members of the *other* tier first.
*/
case class OpportunityParticipantSuggester(
override val participants: Seq[Participant]
) extends ParticipantSuggester {
def suggestParticipants(partialMatch: Option[PartialRoundMatch]): Seq[Participant] = {
partialMatch match {
case None =>
teams.sortBy(_.averageScore).reverse
case Some(MatchSeed(initialTeam)) if ! initialTeam.hasScores_? =>
teams.sortBy(_.hasScores_?)
case Some(MatchSeed(initialTeam)) =>
// highest teams first
val sortedByScore = teams.sortBy(_.averageScore).reverse
val listLength = sortedByScore.length
val windowedGropings = sortedByScore.sliding(3, 3)
val (intermediate1, intermediate2) = windowedGropings
.zipWithIndex
.partition(_._2 % 2 == 0) // group by even and odd indexes
val teamListA: Seq[CompetingTeam] = intermediate1.flatMap(_._1).toSeq
val teamListB: Seq[CompetingTeam] = intermediate2.flatMap(_._1).toSeq
val initialTeamIsListA = teamListB.find(_.id == initialTeam.id).isEmpty
if (initialTeamIsListA) {
teamListB ++
teamListA
} else {
teamListA ++
teamListB
}
case _ =>
suggestJudges(partialMatch)
}
}
def withoutParticipant(participantId: UUID): ParticipantSuggester = {
new OpportunityParticipantSuggester(
participants.filterNot(_.id == participantId)
)
}
def withParticipants(participants: Seq[Participant]): ParticipantSuggester = {
new OpportunityParticipantSuggester(participants)
}
}
/**
* A suggester that takes in another suggester as its sole argument. When starting a new matching
* round, the ByePrioritizingParticipantSuggester will initially suggest teams who have a bye count
* higher than the mean of all the bye counts of all the competing teams.
*/
case class ByePrioritizingParticipantSuggester(
innerSuggester: ParticipantSuggester
) extends ParticipantSuggester {
override val participants = innerSuggester.participants
private[this] def possiblySuggestOnBye(partialMatch: Option[PartialRoundMatch]): Seq[Participant] = {
val byeBuckets = teams.groupBy(_.byeCount)
val byeCounts = byeBuckets.keySet.toSeq.sortBy(c => c).reverse
if (byeCounts.length > 1) {
logger.trace(s"Suggesting teams based on bye counts: ${byeBuckets.mapValues(_.map(_.name.value))}")
val internallyProcessedByeBuckets = byeBuckets.mapValues(filteredTeams =>
innerSuggester.withParticipants(filteredTeams ++ presidingJudges ++ scoringJudges).suggestParticipants(partialMatch)
)
byeCounts.flatMap({ byeCount =>
internallyProcessedByeBuckets.get(byeCount)
}).foldLeft(Seq.empty[Participant])(_ ++ _)
} else {
logger.trace("Byes are balanced. Delegating suggestions.")
innerSuggester.suggestParticipants(partialMatch)
}
}
def suggestParticipants(partialMatch: Option[PartialRoundMatch]): Seq[Participant] = {
logger.trace("Suggesting participants")
partialMatch match {
case value @ None =>
logger.trace("Suggesting prosecution")
possiblySuggestOnBye(value)
case value @ Some(MatchSeed(_)) =>
logger.trace("Suggesting defense")
possiblySuggestOnBye(value)
case other =>
logger.trace("Delegating suggestions")
innerSuggester.suggestParticipants(other)
}
}
def withoutParticipant(participantId: UUID): ParticipantSuggester = {
new ByePrioritizingParticipantSuggester(
innerSuggester.withoutParticipant(participantId)
)
}
def withParticipants(participants: Seq[Participant]): ParticipantSuggester = {
new ByePrioritizingParticipantSuggester(innerSuggester.withParticipants(participants))
}
}
|
farmdawgnation/scyig-judicial
|
src/main/scala/frmr/scyig/matching/ParticipantSuggester.scala
|
Scala
|
apache-2.0
| 8,003 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.jdbc
import java.sql.{Connection, DriverManager}
import java.util.{Locale, Properties}
import org.apache.spark.sql.catalyst.util.CaseInsensitiveMap
import org.apache.spark.sql.types.StructType
/**
* Options for the JDBC data source.
*/
class JDBCOptions(
@transient private val parameters: CaseInsensitiveMap[String])
extends Serializable {
import JDBCOptions._
def this(parameters: Map[String, String]) = this(CaseInsensitiveMap(parameters))
def this(url: String, table: String, parameters: Map[String, String]) = {
this(CaseInsensitiveMap(parameters ++ Map(
JDBCOptions.JDBC_URL -> url,
JDBCOptions.JDBC_TABLE_NAME -> table)))
}
/**
* Returns a property with all options.
*/
val asProperties: Properties = {
val properties = new Properties()
parameters.originalMap.foreach { case (k, v) => properties.setProperty(k, v) }
properties
}
/**
* Returns a property with all options except Spark internal data source options like `url`,
* `dbtable`, and `numPartition`. This should be used when invoking JDBC API like `Driver.connect`
* because each DBMS vendor has its own property list for JDBC driver. See SPARK-17776.
*/
val asConnectionProperties: Properties = {
val properties = new Properties()
parameters.originalMap.filterKeys(key => !jdbcOptionNames(key.toLowerCase(Locale.ROOT)))
.foreach { case (k, v) => properties.setProperty(k, v) }
properties
}
// ------------------------------------------------------------
// Required parameters
// ------------------------------------------------------------
require(parameters.isDefinedAt(JDBC_URL), s"Option '$JDBC_URL' is required.")
require(parameters.isDefinedAt(JDBC_TABLE_NAME), s"Option '$JDBC_TABLE_NAME' is required.")
// a JDBC URL
val url = parameters(JDBC_URL)
// name of table
val table = parameters(JDBC_TABLE_NAME)
// ------------------------------------------------------------
// Optional parameters
// ------------------------------------------------------------
val driverClass = {
val userSpecifiedDriverClass = parameters.get(JDBC_DRIVER_CLASS)
userSpecifiedDriverClass.foreach(DriverRegistry.register)
// Performing this part of the logic on the driver guards against the corner-case where the
// driver returned for a URL is different on the driver and executors due to classpath
// differences.
userSpecifiedDriverClass.getOrElse {
DriverManager.getDriver(url).getClass.getCanonicalName
}
}
// the number of partitions
val numPartitions = parameters.get(JDBC_NUM_PARTITIONS).map(_.toInt)
// the number of seconds the driver will wait for a Statement object to execute to the given
// number of seconds. Zero means there is no limit.
val queryTimeout = parameters.getOrElse(JDBC_QUERY_TIMEOUT, "0").toInt
// ------------------------------------------------------------
// Optional parameters only for reading
// ------------------------------------------------------------
// the column used to partition
val partitionColumn = parameters.get(JDBC_PARTITION_COLUMN)
// the lower bound of partition column
val lowerBound = parameters.get(JDBC_LOWER_BOUND).map(_.toLong)
// the upper bound of the partition column
val upperBound = parameters.get(JDBC_UPPER_BOUND).map(_.toLong)
// numPartitions is also used for data source writing
require((partitionColumn.isEmpty && lowerBound.isEmpty && upperBound.isEmpty) ||
(partitionColumn.isDefined && lowerBound.isDefined && upperBound.isDefined &&
numPartitions.isDefined),
s"When reading JDBC data sources, users need to specify all or none for the following " +
s"options: '$JDBC_PARTITION_COLUMN', '$JDBC_LOWER_BOUND', '$JDBC_UPPER_BOUND', " +
s"and '$JDBC_NUM_PARTITIONS'")
val fetchSize = {
val size = parameters.getOrElse(JDBC_BATCH_FETCH_SIZE, "0").toInt
require(size >= 0,
s"Invalid value `${size.toString}` for parameter " +
s"`$JDBC_BATCH_FETCH_SIZE`. The minimum value is 0. When the value is 0, " +
"the JDBC driver ignores the value and does the estimates.")
size
}
// ------------------------------------------------------------
// Optional parameters only for writing
// ------------------------------------------------------------
// if to truncate the table from the JDBC database
val isTruncate = parameters.getOrElse(JDBC_TRUNCATE, "false").toBoolean
// the create table option , which can be table_options or partition_options.
// E.g., "CREATE TABLE t (name string) ENGINE=InnoDB DEFAULT CHARSET=utf8"
// TODO: to reuse the existing partition parameters for those partition specific options
val createTableOptions = parameters.getOrElse(JDBC_CREATE_TABLE_OPTIONS, "")
val createTableColumnTypes = parameters.get(JDBC_CREATE_TABLE_COLUMN_TYPES)
val customSchema = parameters.get(JDBC_CUSTOM_DATAFRAME_COLUMN_TYPES)
val batchSize = {
val size = parameters.getOrElse(JDBC_BATCH_INSERT_SIZE, "1000").toInt
require(size >= 1,
s"Invalid value `${size.toString}` for parameter " +
s"`$JDBC_BATCH_INSERT_SIZE`. The minimum value is 1.")
size
}
val isolationLevel =
parameters.getOrElse(JDBC_TXN_ISOLATION_LEVEL, "READ_UNCOMMITTED") match {
case "NONE" => Connection.TRANSACTION_NONE
case "READ_UNCOMMITTED" => Connection.TRANSACTION_READ_UNCOMMITTED
case "READ_COMMITTED" => Connection.TRANSACTION_READ_COMMITTED
case "REPEATABLE_READ" => Connection.TRANSACTION_REPEATABLE_READ
case "SERIALIZABLE" => Connection.TRANSACTION_SERIALIZABLE
}
// An option to execute custom SQL before fetching data from the remote DB
val sessionInitStatement = parameters.get(JDBC_SESSION_INIT_STATEMENT)
}
object JDBCOptions {
private val jdbcOptionNames = collection.mutable.Set[String]()
private def newOption(name: String): String = {
jdbcOptionNames += name.toLowerCase(Locale.ROOT)
name
}
val JDBC_URL = newOption("url")
val JDBC_TABLE_NAME = newOption("dbtable")
val JDBC_DRIVER_CLASS = newOption("driver")
val JDBC_PARTITION_COLUMN = newOption("partitionColumn")
val JDBC_LOWER_BOUND = newOption("lowerBound")
val JDBC_UPPER_BOUND = newOption("upperBound")
val JDBC_NUM_PARTITIONS = newOption("numPartitions")
val JDBC_QUERY_TIMEOUT = newOption("queryTimeout")
val JDBC_BATCH_FETCH_SIZE = newOption("fetchsize")
val JDBC_TRUNCATE = newOption("truncate")
val JDBC_CREATE_TABLE_OPTIONS = newOption("createTableOptions")
val JDBC_CREATE_TABLE_COLUMN_TYPES = newOption("createTableColumnTypes")
val JDBC_CUSTOM_DATAFRAME_COLUMN_TYPES = newOption("customSchema")
val JDBC_BATCH_INSERT_SIZE = newOption("batchsize")
val JDBC_TXN_ISOLATION_LEVEL = newOption("isolationLevel")
val JDBC_SESSION_INIT_STATEMENT = newOption("sessionInitStatement")
}
|
lxsmnv/spark
|
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/jdbc/JDBCOptions.scala
|
Scala
|
apache-2.0
| 7,716 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.api
import org.apache.flink.table.api.scala._
import org.apache.flink.table.runtime.utils.CommonTestData
import org.apache.flink.table.utils.TableTestUtil._
import org.apache.flink.table.utils.TableTestBase
import org.junit.Test
/**
* Test for external catalog query plan.
*/
class ExternalCatalogTest extends TableTestBase {
private val table1Path: Array[String] = Array("test", "db1", "tb1")
private val table1TopLevelPath: Array[String] = Array("test", "tb1")
private val table1ProjectedFields: Array[String] = Array("a", "b", "c")
private val table2Path: Array[String] = Array("test", "db2", "tb2")
private val table2ProjectedFields: Array[String] = Array("d", "e", "g")
@Test
def testBatchTableApi(): Unit = {
val util = batchTestUtil()
val tableEnv = util.tableEnv
tableEnv.registerExternalCatalog(
"test",
CommonTestData.getInMemoryTestCatalog(isStreaming = false))
val table1 = tableEnv.scan("test", "db1", "tb1")
val table2 = tableEnv.scan("test", "db2", "tb2")
val result = table2
.select('d * 2, 'e, 'g.upperCase())
.unionAll(table1.select('a * 2, 'b, 'c.upperCase()))
val expected = binaryNode(
"DataSetUnion",
unaryNode(
"DataSetCalc",
sourceBatchTableNode(table2Path, table2ProjectedFields),
term("select", "*(d, 2) AS _c0", "e", "UPPER(g) AS _c2")
),
unaryNode(
"DataSetCalc",
sourceBatchTableNode(table1Path, table1ProjectedFields),
term("select", "*(a, 2) AS _c0", "b", "UPPER(c) AS _c2")
),
term("all", "true"),
term("union", "_c0", "e", "_c2")
)
util.verifyTable(result, expected)
}
@Test
def testBatchSQL(): Unit = {
val util = batchTestUtil()
util.tableEnv.registerExternalCatalog(
"test",
CommonTestData.getInMemoryTestCatalog(isStreaming = false))
val sqlQuery = "SELECT d * 2, e, g FROM test.db2.tb2 WHERE d < 3 UNION ALL " +
"(SELECT a * 2, b, c FROM test.db1.tb1)"
val expected = binaryNode(
"DataSetUnion",
unaryNode(
"DataSetCalc",
sourceBatchTableNode(table2Path, table2ProjectedFields),
term("select", "*(d, 2) AS EXPR$0", "e", "g"),
term("where", "<(d, 3)")),
unaryNode(
"DataSetCalc",
sourceBatchTableNode(table1Path, table1ProjectedFields),
term("select", "*(a, 2) AS EXPR$0", "b", "c")
),
term("all", "true"),
term("union", "EXPR$0", "e", "g"))
util.verifySql(sqlQuery, expected)
}
@Test
def testStreamTableApi(): Unit = {
val util = streamTestUtil()
val tableEnv = util.tableEnv
util.tableEnv.registerExternalCatalog(
"test",
CommonTestData.getInMemoryTestCatalog(isStreaming = true))
val table1 = tableEnv.scan("test", "db1", "tb1")
val table2 = tableEnv.scan("test", "db2", "tb2")
val result = table2.where("d < 3")
.select('d * 2, 'e, 'g.upperCase())
.unionAll(table1.select('a * 2, 'b, 'c.upperCase()))
val expected = binaryNode(
"DataStreamUnion",
unaryNode(
"DataStreamCalc",
sourceStreamTableNode(table2Path, table2ProjectedFields),
term("select", "*(d, 2) AS _c0", "e", "UPPER(g) AS _c2"),
term("where", "<(d, 3)")
),
unaryNode(
"DataStreamCalc",
sourceStreamTableNode(table1Path, table1ProjectedFields),
term("select", "*(a, 2) AS _c0", "b", "UPPER(c) AS _c2")
),
term("all", "true"),
term("union all", "_c0", "e", "_c2")
)
util.verifyTable(result, expected)
}
@Test
def testStreamSQL(): Unit = {
val util = streamTestUtil()
util.tableEnv.registerExternalCatalog(
"test",
CommonTestData.getInMemoryTestCatalog(isStreaming = true))
val sqlQuery = "SELECT d * 2, e, g FROM test.db2.tb2 WHERE d < 3 UNION ALL " +
"(SELECT a * 2, b, c FROM test.db1.tb1)"
val expected = binaryNode(
"DataStreamUnion",
unaryNode(
"DataStreamCalc",
sourceStreamTableNode(table2Path, table2ProjectedFields),
term("select", "*(d, 2) AS EXPR$0", "e", "g"),
term("where", "<(d, 3)")),
unaryNode(
"DataStreamCalc",
sourceStreamTableNode(table1Path, table1ProjectedFields),
term("select", "*(a, 2) AS EXPR$0", "b", "c")
),
term("all", "true"),
term("union all", "EXPR$0", "e", "g"))
util.verifySql(sqlQuery, expected)
}
@Test
def testTopLevelTable(): Unit = {
val util = batchTestUtil()
val tableEnv = util.tableEnv
tableEnv.registerExternalCatalog(
"test",
CommonTestData.getInMemoryTestCatalog(isStreaming = false))
val table1 = tableEnv.scan("test", "tb1")
val table2 = tableEnv.scan("test", "db2", "tb2")
val result = table2
.select('d * 2, 'e, 'g.upperCase())
.unionAll(table1.select('a * 2, 'b, 'c.upperCase()))
val expected = binaryNode(
"DataSetUnion",
unaryNode(
"DataSetCalc",
sourceBatchTableNode(table2Path, table2ProjectedFields),
term("select", "*(d, 2) AS _c0", "e", "UPPER(g) AS _c2")
),
unaryNode(
"DataSetCalc",
sourceBatchTableNode(table1TopLevelPath, table1ProjectedFields),
term("select", "*(a, 2) AS _c0", "b", "UPPER(c) AS _c2")
),
term("all", "true"),
term("union", "_c0", "e", "_c2")
)
util.verifyTable(result, expected)
}
def sourceBatchTableNode(
sourceTablePath: Array[String],
fields: Array[String]): String = {
s"BatchTableSourceScan(table=[[${sourceTablePath.mkString(", ")}]], " +
s"fields=[${fields.mkString(", ")}], " +
s"source=[CsvTableSource(read fields: ${fields.mkString(", ")})])"
}
def sourceStreamTableNode(sourceTablePath: Array[String], fields: Array[String]): String = {
s"StreamTableSourceScan(table=[[${sourceTablePath.mkString(", ")}]], " +
s"fields=[${fields.mkString(", ")}], " +
s"source=[CsvTableSource(read fields: ${fields.mkString(", ")})])"
}
}
|
ueshin/apache-flink
|
flink-table/flink-table-planner/src/test/scala/org/apache/flink/table/api/ExternalCatalogTest.scala
|
Scala
|
apache-2.0
| 6,914 |
package models.oauth2
import java.util.Date
import play.api.db.slick.Config.driver.simple._
import scala.slick.lifted.Tag
import play.api.db.slick.DB
import play.api.Play.current
/**
* Created by Alper on 29.01.2015.
*/
case class AuthCode(authorizationCode: String, userId: Long, redirectUri: Option[String], createdAt: java.sql.Timestamp, scope: Option[String], clientId: String, expiresIn: Int)
class AuthCodes(tag: Tag) extends Table[AuthCode](tag, Some("configuration") ,"auth_code") {
def authorizationCode = column[String]("authorization_code", O.PrimaryKey)
def userId = column[Long]("user_id")
def redirectUri = column[Option[String]]("redirect_uri")
def createdAt = column[java.sql.Timestamp]("created_at")
def scope = column[Option[String]]("scope")
def clientId = column[String]("client_id")
def expiresIn = column[Int]("expires_in")
def * = (authorizationCode, userId, redirectUri, createdAt, scope, clientId, expiresIn) <> ((AuthCode.apply _).tupled, AuthCode.unapply _)
}
object AuthCodes extends DAO {
def find(code: String) = {
DB.withTransaction { implicit session =>
val authCode = AuthCodes.filter(a => a.authorizationCode === code).firstOption
// filtering out expired authorization codes
authCode.filter(p => p.createdAt.getTime + (p.expiresIn * 1000) > new Date().getTime)
}
}
}
|
ZKTecoEu/ZKRestApi
|
ZKRestServer/app/models/oauth2/AuthCode.scala
|
Scala
|
mit
| 1,353 |
package skinny.micro.request
import java.io.BufferedReader
import java.security.Principal
import java.util.Locale
import javax.servlet._
import javax.servlet.http._
import skinny.micro.{ UnstableAccessException, UnstableAccessValidation }
import scala.collection.JavaConverters._
import scala.collection.concurrent.TrieMap
import scala.util.Try
/**
* Stable HttpServletRequest
*
* HttpServletRequest object can be recycled.
*
* see also: https://github.com/scalatra/scalatra/pull/514
* see also: http://jetty.4.x6.nabble.com/jetty-users-getContextPath-returns-null-td4962387.html
* see also: https://bugs.eclipse.org/bugs/show_bug.cgi?id=433321
*/
class StableHttpServletRequest(
private val underlying: HttpServletRequest,
private val unstableAccessValidation: UnstableAccessValidation)
extends HttpServletRequestWrapper(underlying) {
private[this] def ensureStableAccessStrictly(attributeName: String): Unit = {
if (unstableAccessValidation.enabled) {
val threadId = Thread.currentThread.getId
if (unstableAccessValidation.createdThreadId != threadId) {
throw new UnstableAccessException(attributeName)
}
}
}
// -------------------------
// context, fixed request metadata
override def getServletContext: ServletContext = {
ensureStableAccessStrictly("getServletContext")
underlying.getServletContext
}
// AsyncContext must not be cached
override def getAsyncContext: AsyncContext = {
ensureStableAccessStrictly("getAsyncContext")
underlying.getAsyncContext
}
override def startAsync(): AsyncContext = {
ensureStableAccessStrictly("startAsync")
underlying.startAsync()
}
override def startAsync(servletRequest: ServletRequest, servletResponse: ServletResponse): AsyncContext = {
ensureStableAccessStrictly("startAsync")
underlying.startAsync(servletRequest, servletResponse)
}
private[this] var _getRequest = super.getRequest
override def getRequest: ServletRequest = _getRequest
override def setRequest(request: ServletRequest): Unit = {
_getRequest.synchronized {
super.setRequest(request)
_getRequest = request
}
}
override def isWrapperFor(wrapped: ServletRequest): Boolean = super.isWrapperFor(wrapped)
override def isWrapperFor(wrappedType: Class[_]): Boolean = super.isWrapperFor(wrappedType)
private[this] val _getDispatcherType = underlying.getDispatcherType
override def getDispatcherType: DispatcherType = _getDispatcherType
override def getReader: BufferedReader = underlying.getReader
override def getInputStream: ServletInputStream = underlying.getInputStream
private[this] val _getAuthType = underlying.getAuthType
override def getAuthType: String = _getAuthType
private[this] val _getMethod = underlying.getMethod
override def getMethod: String = _getMethod
private[this] val _getPathInfo = underlying.getPathInfo
override def getPathInfo: String = _getPathInfo
private[this] val _getPathTranslated = underlying.getPathTranslated
override def getPathTranslated: String = _getPathTranslated
private[this] val _getContextPath = underlying.getContextPath
override def getContextPath: String = _getContextPath
private[this] val _getQueryString = underlying.getQueryString
override def getQueryString: String = _getQueryString
private[this] val _getRemoteUser = underlying.getRemoteUser
override def getRemoteUser: String = _getRemoteUser
private[this] val _getRequestedSessionId = underlying.getRequestedSessionId
override def getRequestedSessionId: String = _getRequestedSessionId
private[this] val _getRequestURI = underlying.getRequestURI
override def getRequestURI: String = _getRequestURI
private[this] val _getServletPath = underlying.getServletPath
override def getServletPath: String = _getServletPath
private[this] val _isRequestedSessionIdValid = underlying.isRequestedSessionIdValid
override def isRequestedSessionIdValid: Boolean = _isRequestedSessionIdValid
private[this] val _isRequestedSessionIdFromCookie = underlying.isRequestedSessionIdFromCookie
override def isRequestedSessionIdFromCookie: Boolean = _isRequestedSessionIdFromCookie
private[this] val _isRequestedSessionIdFromURL = underlying.isRequestedSessionIdFromURL
override def isRequestedSessionIdFromURL: Boolean = _isRequestedSessionIdFromURL
override def isRequestedSessionIdFromUrl: Boolean = isRequestedSessionIdFromURL
private[this] var _getCharacterEncoding = underlying.getCharacterEncoding
override def getCharacterEncoding: String = _getCharacterEncoding
override def setCharacterEncoding(enc: String): Unit = {
_getCharacterEncoding.synchronized {
underlying.setCharacterEncoding(enc)
_getCharacterEncoding = enc
}
}
private[this] val _getContentLength = underlying.getContentLength
override def getContentLength: Int = _getContentLength
private[this] val _getContentType = underlying.getContentType
override def getContentType: String = _getContentType
private[this] val _getProtocol = underlying.getProtocol
override def getProtocol: String = _getProtocol
// java.lang.IllegalStateException: No uri on Jetty when testing
private[this] val _getServerName = Try(underlying.getServerName).getOrElse(null)
override def getServerName: String = _getServerName
private[this] val _getScheme = underlying.getScheme
override def getScheme: String = _getScheme
// java.lang.IllegalStateException: No uri on Jetty when testing
private[this] val _getServerPort = Try(underlying.getServerPort).getOrElse(-1)
override def getServerPort: Int = _getServerPort
private[this] val _getRemoteAddr = underlying.getRemoteAddr
override def getRemoteAddr: String = _getRemoteAddr
private[this] val _getRemoteHost = underlying.getRemoteHost
override def getRemoteHost: String = _getRemoteHost
private[this] val _isSecure = underlying.isSecure
override def isSecure: Boolean = _isSecure
private[this] val _getRemotePort = underlying.getRemotePort
override def getRemotePort: Int = _getRemotePort
private[this] val _getLocalName = underlying.getLocalName
override def getLocalName: String = _getLocalName
private[this] val _getLocalAddr = underlying.getLocalAddr
override def getLocalAddr: String = _getLocalAddr
private[this] val _getLocalPort = underlying.getLocalPort
override def getLocalPort: Int = _getLocalPort
private[this] val _isAsyncStarted = underlying.isAsyncStarted
override def isAsyncStarted: Boolean = _isAsyncStarted
private[this] val _isAsyncSupported = underlying.isAsyncSupported
override def isAsyncSupported: Boolean = _isAsyncSupported
// java.lang.IllegalStateException: No uri on Jetty when testing
private[this] val _getRequestURL = Try(underlying.getRequestURL).getOrElse(new StringBuffer)
override def getRequestURL: StringBuffer = _getRequestURL
private[this] val _getCookies = underlying.getCookies
override def getCookies: Array[Cookie] = _getCookies
private[this] val _getUserPrincipal = underlying.getUserPrincipal
override def getUserPrincipal: Principal = _getUserPrincipal
// should not cache the value: java.lang.IllegalStateException: No SessionManager
override def getSession: HttpSession = {
ensureStableAccessStrictly("getSession")
underlying.getSession
}
override def getSession(create: Boolean): HttpSession = {
ensureStableAccessStrictly("getSession")
underlying.getSession(create)
}
override def authenticate(response: HttpServletResponse): Boolean = {
ensureStableAccessStrictly("authenticate")
underlying.authenticate(response)
}
override def logout(): Unit = {
ensureStableAccessStrictly("logout")
underlying.logout()
}
override def isUserInRole(role: String): Boolean = {
ensureStableAccessStrictly("isUserInRole")
underlying.isUserInRole(role)
}
override def login(username: String, password: String): Unit = {
ensureStableAccessStrictly("login")
underlying.login(username, password)
}
// Don't override getParts
// javax.servlet.ServletException: Content-Type != multipart/form-data
override def getParts: java.util.Collection[Part] = {
ensureStableAccessStrictly("getParts")
underlying.getParts
}
override def getPart(name: String): Part = {
ensureStableAccessStrictly("getPart")
underlying.getPart(name)
}
// deprecated
// override def getRealPath(path: String): String = underlying.getRealPath(path)
// -------------------------
// parameters
private[this] val _getParameterNames = underlying.getParameterNames
private[this] val _getParameterMap = underlying.getParameterMap
override def getParameterNames: java.util.Enumeration[String] = _getParameterNames
override def getParameterMap: java.util.Map[String, Array[String]] = _getParameterMap
override def getParameter(name: String): String = getParameterMap.get(name).headOption.orNull[String]
override def getParameterValues(name: String): Array[String] = getParameterMap.get(name)
override def getRequestDispatcher(path: String): RequestDispatcher = underlying.getRequestDispatcher(path)
// -------------------------
// locale
private[this] val _getLocale = underlying.getLocale
private[this] val _getLocales = underlying.getLocales
override def getLocale: Locale = _getLocale
override def getLocales: java.util.Enumeration[Locale] = _getLocales
// -------------------------
// attributes
private[this] def _getAttributeNames: java.util.Enumeration[String] = _attributes.keys.iterator.asJavaEnumeration
private[this] val _attributes: TrieMap[String, AnyRef] = {
val result = new TrieMap[String, AnyRef]
Option(underlying.getAttributeNames).map(_.asScala).foreach { names =>
names.foreach { name =>
val value = underlying.getAttribute(name)
if (value != null) {
result.put(name, value)
}
}
}
result
}
override def getAttributeNames: java.util.Enumeration[String] = _getAttributeNames
override def getAttribute(name: String): AnyRef = _attributes.getOrElse(name, null)
override def setAttribute(name: String, o: scala.Any): Unit = {
underlying.setAttribute(name, o)
Option(o).foreach(v => _attributes.put(name, v.asInstanceOf[AnyRef]))
}
override def removeAttribute(name: String): Unit = {
underlying.removeAttribute(name)
_attributes.remove(name)
}
// -------------------------
// headers
// request headers are immutable
private[this] val _getHeaderNames = underlying.getHeaderNames
private[this] val _cachedGetHeader: Map[String, String] = {
Option(underlying.getHeaderNames)
.map(_.asScala.map(name => name -> underlying.getHeader(name)).filterNot { case (_, v) => v == null }.toMap)
.getOrElse(Map.empty)
}
private[this] val _cachedGetHeaders: Map[String, java.util.Enumeration[String]] = {
Option(underlying.getHeaderNames)
.map(_.asScala.map(name => name -> underlying.getHeaders(name)).filterNot { case (_, v) => v == null }.toMap)
.getOrElse(Map.empty)
}
override def getHeaderNames: java.util.Enumeration[String] = _getHeaderNames
override def getHeader(name: String): String = _cachedGetHeader.get(name).orNull[String]
override def getIntHeader(name: String): Int = {
// an integer expressing the value of the request header or -1 if the request doesn't have a header of this name
_cachedGetHeader.get(name).map(_.toInt).getOrElse(-1)
}
override def getHeaders(name: String): java.util.Enumeration[String] = {
// If the request does not have any headers of that name return an empty enumeration
_cachedGetHeaders.get(name).getOrElse(java.util.Collections.emptyEnumeration[String]())
}
override def getDateHeader(name: String): Long = {
// -1 if the named header was not included with the request
_cachedGetHeader.get(name).map(_.toLong).getOrElse(-1L)
}
}
object StableHttpServletRequest {
def apply(req: HttpServletRequest, unstableAccessValidation: UnstableAccessValidation): StableHttpServletRequest = {
if (req == null) {
throw new IllegalStateException("Use AsyncResult { ... } or FutureWithContext { implicit ctx => ... } instead.")
} else if (req.isInstanceOf[StableHttpServletRequest]) {
req.asInstanceOf[StableHttpServletRequest]
} else {
new StableHttpServletRequest(req, unstableAccessValidation)
}
}
}
|
xerial/skinny-micro
|
micro/src/main/scala/skinny/micro/request/StableHttpServletRequest.scala
|
Scala
|
bsd-2-clause
| 12,444 |
/*
* @author Philip Stutz
*
* Copyright 2014 University of Zurich
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.signalcollect.triplerush
import org.scalatest.FlatSpec
import org.scalatest.prop.Checkers
import com.signalcollect.triplerush.EfficientIndexPattern.longToIndexPattern
import com.signalcollect.util.TestAnnouncements
class EfficientIndexPatternSpec extends FlatSpec with Checkers with TestAnnouncements {
"EfficientIndexPattern" should "correctly encode and decode index triple patterns" in {
check((s: Int, p: Int, o: Int) => {
val tp = TriplePattern(s, p, o)
if ((s == 0 || p == 0 || o == 0) && s >= 0 && p >= 0 && o >= 0) {
val efficient = tp.toEfficientIndexPattern
val decoded = efficient.toTriplePattern
val success = decoded === tp
if (!success) {
println(s"Problematic $tp was decoded as: $decoded.")
println(s"First embedded is: ${efficient.extractFirst}")
println(s"First mapped to positive is: ${efficient.extractFirst & Int.MaxValue}")
println(s"Second embedded is: ${efficient.extractSecond}")
println(s"Second mapped to positive is: ${efficient.extractSecond & Int.MaxValue}")
}
success
} else {
true
}
}, minSuccessful(10))
}
}
|
hicolour/triplerush
|
src/test/scala/com/signalcollect/triplerush/EfficientIndexPatternSpec.scala
|
Scala
|
apache-2.0
| 1,848 |
package org.oc.ld32.entity
import org.lengine.render.{Sprite, Texture}
class EntityBaguettePiece(val value: Float) extends BaguetteEntity {
private val sprite: Sprite = new Sprite(new Texture("assets/textures/entities/baguette.png"))
sprite.width *= 2f
sprite.height *= 2f
sprite.getCenter *= 2f
override def render(delta: Float): Unit = {
sprite.setPos(getPos.x, getPos.y)
sprite.render(delta)
}
override def update(delta: Float): Unit = {
boundingBox.x = getPos.x
boundingBox.y = getPos.y
}
override def init: Unit = {
super.init
boundingBox.width = 64f
boundingBox.height = 64f
}
override def onCollide(other: BaguetteEntity): Unit = {
other match {
case player: EntityPlayer =>
player.baguetteCompletion += value
level.despawn(this)
case _ =>
}
}
}
|
OurCraft/LD32
|
src/main/scala/org/oc/ld32/entity/EntityBaguettePiece.scala
|
Scala
|
apache-2.0
| 849 |
/*
* Copyright (C) 2015 Stratio (http://stratio.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.stratio.sparta.driver.cube
import com.stratio.sparta.sdk.{DimensionValuesTime, MeasuresValues}
import org.apache.spark.sql.Row
import org.apache.spark.streaming.dstream.DStream
/**
* It builds a pre-calculated DataCube with dimension/s, cube/s and operation/s defined by the user in the policy.
* Steps:
* From a event stream it builds a Seq[(Seq[DimensionValue],Map[String, JSerializable])] with all needed data.
* For each cube it calculates aggregations taking the stream calculated in the previous step.
* Finally, it returns a modified stream with pre-calculated data encapsulated in a UpdateMetricOperation.
* This final stream will be used mainly by outputs.
* @param cubes that will be contain how the data will be aggregate.
*/
case class CubeMaker(cubes: Seq[Cube]) {
/**
* It builds the DataCube calculating aggregations.
* @param inputStream with the original stream of data.
* @return the built Cube.
*/
def setUp(inputStream: DStream[Row]): Seq[(String, DStream[(DimensionValuesTime, MeasuresValues)])] = {
cubes.map(cube => {
val currentCube = new CubeOperations(cube)
val extractedDimensionsStream = currentCube.extractDimensionsAggregations(inputStream)
(cube.name, cube.aggregate(extractedDimensionsStream))
})
}
}
|
danielcsant/sparta
|
driver/src/main/scala/com/stratio/sparta/driver/cube/CubeMaker.scala
|
Scala
|
apache-2.0
| 1,920 |
/*
* Copyright 2020 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package controllers.registration.attachments
import featureswitch.core.config.FeatureSwitching
import fixtures.ITRegistrationFixtures
import itutil.ControllerISpec
import models.{ApplicantDetails, TransactorDetails}
import models.api._
import play.api.libs.json.{JsString, Json}
import play.api.libs.ws.WSResponse
import scala.concurrent.Future
class PostalCoverSheetControllerISpec extends ControllerISpec with ITRegistrationFixtures with FeatureSwitching {
val url: String = controllers.registration.attachments.routes.PostalCoverSheetController.show.url
val testAckRef = "VRN1234567"
s"GET $url" must {
"return an OK" in new Setup {
given()
.user.isAuthorised()
.vatScheme.has("acknowledgement-reference", JsString(s"$testAckRef"))
.vatScheme.has("attachments", Json.toJson(Attachments(Some(Post), List[AttachmentType](IdentityEvidence, VAT2))))
.registrationApi.getSection[EligibilitySubmissionData](Some(testEligibilitySubmissionData))
insertCurrentProfileIntoDb(currentProfile, sessionId)
val response: Future[WSResponse] = buildClient(url).get()
whenReady(response) { res =>
res.status mustBe 200
}
}
"return an OK for a transactor" in new Setup {
given()
.user.isAuthorised()
.vatScheme.has("acknowledgement-reference", JsString(s"$testAckRef"))
.vatScheme.has("attachments", Json.toJson(Attachments(Some(Post), List[AttachmentType](IdentityEvidence, VAT2))))
.registrationApi.getSection[TransactorDetails](Some(validTransactorDetails))
.registrationApi.getSection[EligibilitySubmissionData](Some(testEligibilitySubmissionData.copy(isTransactor = true)))
.vatScheme.has("applicant-details", Json.toJson(validFullApplicantDetails)(ApplicantDetails.writes))
insertCurrentProfileIntoDb(currentProfile, sessionId)
val response: Future[WSResponse] = buildClient(url).get()
whenReady(response) { res =>
res.status mustBe 200
}
}
}
}
|
hmrc/vat-registration-frontend
|
it/controllers/registration/attachments/PostalCoverSheetControllerISpec.scala
|
Scala
|
apache-2.0
| 2,631 |
package views.vrm_retention
import models.CacheKeyPrefix
object Retain {
final val RetainCacheKey = s"${CacheKeyPrefix}retain"
}
|
dvla/vrm-retention-online
|
app/views/vrm_retention/Retain.scala
|
Scala
|
mit
| 134 |
package models
import java.util.UUID
import com.mohiva.play.silhouette.api.{Identity, LoginInfo}
/**
* The user object.
*
* @param userID The unique ID of the user.
* @param loginInfo The linked login info.
* @param firstName Maybe the first name of the authenticated user.
* @param lastName Maybe the last name of the authenticated user.
* @param fullName Maybe the full name of the authenticated user.
* @param email Maybe the email of the authenticated provider.
* @param avatarURL Maybe the avatar URL of the authenticated provider.
*/
case class User(
userID: UUID,
loginInfo: LoginInfo,
role : UserRole,
firstName: Option[String],
lastName: Option[String],
fullName: Option[String],
email: Option[String],
avatarURL: Option[String]) extends Identity
|
OpenCompare/OpenCompare
|
org.opencompare/play-app/app/models/User.scala
|
Scala
|
apache-2.0
| 785 |
package chana.script
import akka.actor.Actor
import akka.actor.ActorLogging
import akka.actor.ActorRef
import akka.actor.ActorSystem
import akka.actor.ExtendedActorSystem
import akka.actor.Extension
import akka.actor.ExtensionId
import akka.actor.ExtensionIdProvider
import akka.actor.Props
import akka.contrib.datareplication.DataReplication
import akka.contrib.datareplication.LWWMap
import akka.pattern.ask
import akka.cluster.Cluster
import java.util.concurrent.locks.ReentrantReadWriteLock
import javax.script.Compilable
import javax.script.CompiledScript
import javax.script.ScriptEngineManager
import scala.collection.mutable
import scala.concurrent.duration._
import scala.util.Failure
import scala.util.Success
/**
* Extension that starts a [[DistributedScriptBoard]] actor
* with settings defined in config section `chana.script-board`.
*/
object DistributedScriptBoard extends ExtensionId[DistributedScriptBoardExtension] with ExtensionIdProvider {
// -- implementation of akka extention
override def get(system: ActorSystem) = super.get(system)
override def lookup = DistributedScriptBoard
override def createExtension(system: ExtendedActorSystem) = new DistributedScriptBoardExtension(system)
// -- end of implementation of akka extention
/**
* Scala API: Factory method for `DistributedScriptBoard` [[akka.actor.Props]].
*/
def props(): Props = Props(classOf[DistributedScriptBoard])
/**
* There is a sbt issue related to classloader, anyway, use new ScriptEngineManager(null),
* by adding 'null' classloader solves this issue:
* https://github.com/playframework/playframework/issues/2532
*/
lazy val engineManager = new ScriptEngineManager(null)
lazy val engine = engineManager.getEngineByName("nashorn").asInstanceOf[Compilable]
private val keyToScript = new mutable.HashMap[String, CompiledScript]()
private val entityFieldToScripts = new mutable.HashMap[String, Map[String, CompiledScript]].withDefaultValue(Map.empty)
private val scriptsLock = new ReentrantReadWriteLock()
private def keyOf(entity: String, field: String, id: String) = entity + "/" + field + "/" + id
private def putScript(key: String, compiledScript: CompiledScript): Unit = key.split('/') match {
case Array(entity, field, id) => putScript(entity, field, id, compiledScript)
case _ =>
}
private def putScript(entity: String, field: String, id: String, compiledScript: CompiledScript): Unit = {
val entityField = entity + "/" + field
val key = entityField + "/" + id
try {
scriptsLock.writeLock.lock
keyToScript(key) = compiledScript
entityFieldToScripts(entityField) = entityFieldToScripts(entityField) + (id -> compiledScript)
} finally {
scriptsLock.writeLock.unlock
}
}
private def removeScript(key: String): Unit = key.split('/') match {
case Array(entity, field, id) => removeScript(entity, field, id)
case _ =>
}
private def removeScript(entity: String, field: String, id: String): Unit = {
val entityField = entity + "/" + field
val key = entityField + "/" + id
try {
scriptsLock.writeLock.lock
keyToScript -= key
entityFieldToScripts(entityField) = entityFieldToScripts(entityField) - id
} finally {
scriptsLock.writeLock.unlock
}
}
def scriptsOf(entity: String, field: String): Map[String, CompiledScript] =
try {
scriptsLock.readLock.lock
entityFieldToScripts(entity + "/" + field)
} finally {
scriptsLock.readLock.unlock
}
val DataKey = "chana-scripts"
}
class DistributedScriptBoard extends Actor with ActorLogging {
import akka.contrib.datareplication.Replicator._
implicit val cluster = Cluster(context.system)
import context.dispatcher
val replicator = DataReplication(context.system).replicator
replicator ! Subscribe(DistributedScriptBoard.DataKey, self)
def receive = {
case chana.PutScript(entity, field, id, script) =>
val commander = sender()
compileScript(script) match {
case Success(compiledScript) =>
val key = DistributedScriptBoard.keyOf(entity, field, id)
replicator.ask(Update(DistributedScriptBoard.DataKey, LWWMap(), WriteAll(60.seconds))(_ + (key -> script)))(60.seconds).onComplete {
case Success(_: UpdateSuccess) =>
DistributedScriptBoard.putScript(key, compiledScript)
log.info("put script (Update) [{}]:\\n{} ", key, script)
commander ! Success(key)
case Success(_: UpdateTimeout) => commander ! Failure(chana.UpdateTimeoutException)
case Success(x: InvalidUsage) => commander ! Failure(x)
case Success(x: ModifyFailure) => commander ! Failure(x)
case failure => commander ! failure
}
case Failure(ex) =>
log.error(ex, ex.getMessage)
}
case chana.RemoveScript(entity, field, id) =>
val commander = sender()
val key = DistributedScriptBoard.keyOf(entity, field, id)
replicator.ask(Update(DistributedScriptBoard.DataKey, LWWMap(), WriteAll(60.seconds))(_ - key))(60.seconds).onComplete {
case Success(_: UpdateSuccess) =>
log.info("remove script (Update): {}", key)
DistributedScriptBoard.removeScript(key)
commander ! Success(key)
case Success(_: UpdateTimeout) => commander ! Failure(chana.UpdateTimeoutException)
case Success(x: InvalidUsage) => commander ! Failure(x)
case Success(x: ModifyFailure) => commander ! Failure(x)
case failure => commander ! failure
}
case Changed(DistributedScriptBoard.DataKey, LWWMap(entries: Map[String, String] @unchecked)) =>
// check if there were newly added
entries.foreach {
case (key, script) =>
DistributedScriptBoard.keyToScript.get(key) match {
case None =>
compileScript(script) match {
case Success(compiledScript) =>
DistributedScriptBoard.putScript(key, compiledScript)
log.info("put script (Changed) [{}]:\\n{} ", key, script)
case Failure(ex) =>
log.error(ex, ex.getMessage)
}
case Some(script) => // TODO, existed, but changed?
}
}
// check if there were removed
val toRemove = DistributedScriptBoard.keyToScript.filter(x => !entries.contains(x._1)).keys
if (toRemove.nonEmpty) {
log.info("remove script (Changed): {}", toRemove)
toRemove foreach DistributedScriptBoard.removeScript
}
}
private def compileScript(script: String) =
try {
val compiledScript = DistributedScriptBoard.engine.compile(script)
Success(compiledScript)
} catch {
case ex: Throwable => Failure(ex)
}
}
class DistributedScriptBoardExtension(system: ExtendedActorSystem) extends Extension {
private val config = system.settings.config.getConfig("chana.script-board")
private val role: Option[String] = config.getString("role") match {
case "" => None
case r => Some(r)
}
/**
* Returns true if this member is not tagged with the role configured for the
* mediator.
*/
def isTerminated: Boolean = Cluster(system).isTerminated || !role.forall(Cluster(system).selfRoles.contains)
/**
* The [[DistributedScriptBoard]]
*/
val board: ActorRef = {
if (isTerminated)
system.deadLetters
else {
val name = config.getString("name")
system.actorOf(
DistributedScriptBoard.props(),
name)
}
}
}
|
matthewtt/chana
|
src/main/scala/chana/script/DistributedScriptBoard.scala
|
Scala
|
apache-2.0
| 7,679 |
package org.scaladebugger.api.lowlevel.watchpoints
import java.util.concurrent.atomic.AtomicBoolean
import com.sun.jdi.event.AccessWatchpointEvent
import org.scaladebugger.api.lowlevel.events.EventType._
import org.scaladebugger.api.utils.JDITools
import org.scaladebugger.api.virtualmachines.DummyScalaVirtualMachine
import org.scaladebugger.test.helpers.ParallelMockFunSpec
import test.{ApiTestUtilities, VirtualMachineFixtures}
class StandardAccessWatchpointManagerIntegrationSpec extends ParallelMockFunSpec
with VirtualMachineFixtures
with ApiTestUtilities
{
describe("StandardAccessWatchpointManager") {
it("should be able to detect access to a field") {
val testClass = "org.scaladebugger.test.watchpoints.AccessWatchpoint"
val testFile = JDITools.scalaClassStringToFileString(testClass)
val className = "org.scaladebugger.test.watchpoints.SomeAccessClass"
val fieldName = "field"
val detectedAccessWatchpoint = new AtomicBoolean(false)
val s = DummyScalaVirtualMachine.newInstance()
import s.lowlevel._
accessWatchpointManager.createAccessWatchpointRequest(
className,
fieldName
)
// Listen for access watchpoint events for specific variable
eventManager.addResumingEventHandler(AccessWatchpointEventType, e => {
val accessWatchpointEvent = e.asInstanceOf[AccessWatchpointEvent]
val name = accessWatchpointEvent.field().name()
// If we detected access for our variable, mark our flag
if (name == fieldName) detectedAccessWatchpoint.set(true)
})
withVirtualMachine(testClass, pendingScalaVirtualMachines = Seq(s)) { (s) =>
logTimeTaken(eventually {
assert(detectedAccessWatchpoint.get(), s"$fieldName never accessed!")
})
}
}
}
}
|
ensime/scala-debugger
|
scala-debugger-api/src/it/scala/org/scaladebugger/api/lowlevel/watchpoints/StandardAccessWatchpointManagerIntegrationSpec.scala
|
Scala
|
apache-2.0
| 1,821 |
package edu.gemini.pit.catalog.votable
object CooSys {
def apply(e: Elem): CooSys = {
val id = e.attr("ID")
val equinox = e.attr("equinox")
val epoch = e.attr("epoch")
val system = e.attr("system")
CooSys(id, equinox, epoch, system)
}
}
case class CooSys(id:String, equinox:String, epoch:String, system:String)
|
arturog8m/ocs
|
bundle/edu.gemini.pit/src/main/scala/edu/gemini/pit/catalog/votable/CooSys.scala
|
Scala
|
bsd-3-clause
| 342 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive.execution
import org.apache.hadoop.hive.serde2.`lazy`.LazySimpleSerDe
import org.scalatest.exceptions.TestFailedException
import org.apache.spark.{SparkException, TaskContext, TestUtils}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions.{Attribute, AttributeReference}
import org.apache.spark.sql.catalyst.plans.physical.Partitioning
import org.apache.spark.sql.execution.{SparkPlan, SparkPlanTest, UnaryExecNode}
import org.apache.spark.sql.hive.test.TestHiveSingleton
import org.apache.spark.sql.types.StringType
class ScriptTransformationSuite extends SparkPlanTest with TestHiveSingleton {
import spark.implicits._
private val noSerdeIOSchema = HiveScriptIOSchema(
inputRowFormat = Seq.empty,
outputRowFormat = Seq.empty,
inputSerdeClass = None,
outputSerdeClass = None,
inputSerdeProps = Seq.empty,
outputSerdeProps = Seq.empty,
recordReaderClass = None,
recordWriterClass = None,
schemaLess = false
)
private val serdeIOSchema = noSerdeIOSchema.copy(
inputSerdeClass = Some(classOf[LazySimpleSerDe].getCanonicalName),
outputSerdeClass = Some(classOf[LazySimpleSerDe].getCanonicalName)
)
test("cat without SerDe") {
assume(TestUtils.testCommandAvailable("/bin/bash"))
val rowsDf = Seq("a", "b", "c").map(Tuple1.apply).toDF("a")
checkAnswer(
rowsDf,
(child: SparkPlan) => new ScriptTransformationExec(
input = Seq(rowsDf.col("a").expr),
script = "cat",
output = Seq(AttributeReference("a", StringType)()),
child = child,
ioschema = noSerdeIOSchema
),
rowsDf.collect())
}
test("cat with LazySimpleSerDe") {
assume(TestUtils.testCommandAvailable("/bin/bash"))
val rowsDf = Seq("a", "b", "c").map(Tuple1.apply).toDF("a")
checkAnswer(
rowsDf,
(child: SparkPlan) => new ScriptTransformationExec(
input = Seq(rowsDf.col("a").expr),
script = "cat",
output = Seq(AttributeReference("a", StringType)()),
child = child,
ioschema = serdeIOSchema
),
rowsDf.collect())
}
test("script transformation should not swallow errors from upstream operators (no serde)") {
assume(TestUtils.testCommandAvailable("/bin/bash"))
val rowsDf = Seq("a", "b", "c").map(Tuple1.apply).toDF("a")
val e = intercept[TestFailedException] {
checkAnswer(
rowsDf,
(child: SparkPlan) => new ScriptTransformationExec(
input = Seq(rowsDf.col("a").expr),
script = "cat",
output = Seq(AttributeReference("a", StringType)()),
child = ExceptionInjectingOperator(child),
ioschema = noSerdeIOSchema
),
rowsDf.collect())
}
assert(e.getMessage().contains("intentional exception"))
}
test("script transformation should not swallow errors from upstream operators (with serde)") {
assume(TestUtils.testCommandAvailable("/bin/bash"))
val rowsDf = Seq("a", "b", "c").map(Tuple1.apply).toDF("a")
val e = intercept[TestFailedException] {
checkAnswer(
rowsDf,
(child: SparkPlan) => new ScriptTransformationExec(
input = Seq(rowsDf.col("a").expr),
script = "cat",
output = Seq(AttributeReference("a", StringType)()),
child = ExceptionInjectingOperator(child),
ioschema = serdeIOSchema
),
rowsDf.collect())
}
assert(e.getMessage().contains("intentional exception"))
}
test("SPARK-14400 script transformation should fail for bad script command") {
assume(TestUtils.testCommandAvailable("/bin/bash"))
val rowsDf = Seq("a", "b", "c").map(Tuple1.apply).toDF("a")
val e = intercept[SparkException] {
val plan =
new ScriptTransformationExec(
input = Seq(rowsDf.col("a").expr),
script = "some_non_existent_command",
output = Seq(AttributeReference("a", StringType)()),
child = rowsDf.queryExecution.sparkPlan,
ioschema = serdeIOSchema)
SparkPlanTest.executePlan(plan, hiveContext)
}
assert(e.getMessage.contains("Subprocess exited with status"))
}
}
private case class ExceptionInjectingOperator(child: SparkPlan) extends UnaryExecNode {
override protected def doExecute(): RDD[InternalRow] = {
child.execute().map { x =>
assert(TaskContext.get() != null) // Make sure that TaskContext is defined.
Thread.sleep(1000) // This sleep gives the external process time to start.
throw new IllegalArgumentException("intentional exception")
}
}
override def output: Seq[Attribute] = child.output
override def outputPartitioning: Partitioning = child.outputPartitioning
}
|
bravo-zhang/spark
|
sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/ScriptTransformationSuite.scala
|
Scala
|
apache-2.0
| 5,601 |
/*
* Copyright 2011-2014 Chris de Vreeze
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package nl.ebpi.yaidom.core
import nl.ebpi.yaidom.XmlStringUtils
/**
* Namespace declarations (and undeclarations), typically at the level of one element.
*
* For example, consider the following XML:
* {{{
* <book:Bookstore xmlns:book="http://bookstore/book" xmlns:auth="http://bookstore/author">
* <book:Book ISBN="978-0321356680" Price="35" Edition="2">
* <book:Title>Effective Java (2nd Edition)</book:Title>
* <book:Authors>
* <auth:Author>
* <auth:First_Name>Joshua</auth:First_Name>
* <auth:Last_Name>Bloch</auth:Last_Name>
* </auth:Author>
* </book:Authors>
* </book:Book>
* </book:Bookstore>
* }}}
* Then only the root element contains namespace declarations, viz.:
* {{{
* Declarations.from("book" -> "http://bookstore/book", "auth" -> "http://bookstore/author")
* }}}
*
* The `Declarations` is backed by a map from prefixes (or the empty string for the default namespace) to namespace URIs (or the empty string).
* If the mapped value is the empty string, it is an undeclaration.
*
* Prefix 'xml' is not allowed as key in this map. That prefix, mapping to namespace URI 'http://www.w3.org/XML/1998/namespace',
* is always available, without needing any declaration.
*
* This class does not depend on the `Scope` class.
*
* There are no methods for subset relationships on namespace declarations (unlike for class `Scope`).
* After all, in the presence of namespace undeclarations, such a subset relationship would become a bit unnatural.
*
* @author Chris de Vreeze
*/
final case class Declarations(prefixNamespaceMap: Map[String, String]) extends Immutable {
import Declarations._
validate(prefixNamespaceMap)
/** Returns true if this Declarations is empty. Faster than comparing this Declarations against the empty Declarations. */
def isEmpty: Boolean = prefixNamespaceMap.isEmpty
/** Returns an adapted copy of this Declarations, but retaining only the undeclarations, if any */
def retainingUndeclarations: Declarations = {
val m = prefixNamespaceMap filter { case (pref, ns) => ns == "" }
if (m.isEmpty) Declarations.Empty else Declarations(m)
}
/** Returns an adapted copy of this Declarations, but without any undeclarations, if any */
def withoutUndeclarations: Declarations = {
val m = prefixNamespaceMap filter { case (pref, ns) => ns != "" }
if (m.size == prefixNamespaceMap.size) this else Declarations(m)
}
/** Returns an adapted copy of this Declarations, but retaining only the default namespace, if any */
def retainingDefaultNamespace: Declarations = {
val m = prefixNamespaceMap filter { case (pref, ns) => pref == DefaultNsPrefix }
if (m.isEmpty) Declarations.Empty else Declarations(m)
}
/** Returns an adapted copy of this Declarations, but without the default namespace, if any */
def withoutDefaultNamespace: Declarations = {
if (!prefixNamespaceMap.contains(DefaultNsPrefix)) this else Declarations(prefixNamespaceMap - DefaultNsPrefix)
}
/** Returns `Declarations(this.prefixNamespaceMap ++ declarations.prefixNamespaceMap)` */
def append(declarations: Declarations): Declarations = Declarations(this.prefixNamespaceMap ++ declarations.prefixNamespaceMap)
/** Returns `Declarations(this.prefixNamespaceMap -- prefixes)` */
def minus(prefixes: Set[String]): Declarations = Declarations(this.prefixNamespaceMap -- prefixes)
/** Alias for `append` */
def ++(declarations: Declarations): Declarations = append(declarations)
/** Alias for `minus` */
def --(prefixes: Set[String]): Declarations = minus(prefixes)
/** Creates a `String` representation of this `Declarations`, as it is shown in an XML element */
def toStringInXml: String = {
val declaredString = properDeclarationsToStringInXml
val defaultNamespaceUndeclared: Boolean = prefixNamespaceMap.get(DefaultNsPrefix) == Some("")
val defaultNsUndeclaredString = if (defaultNamespaceUndeclared) """xmlns=""""" else ""
val undeclaredPrefixes: Set[String] = ((prefixNamespaceMap - DefaultNsPrefix) filter (kv => kv._2 == "")).keySet
val undeclaredPrefixesString = undeclaredPrefixes map { pref => """xmlns:%s=""""".format(pref) } mkString (" ")
List(declaredString, defaultNsUndeclaredString, undeclaredPrefixesString) filterNot { _ == "" } mkString (" ")
}
private def properDeclarationsToStringInXml: String = {
val declaredMap = prefixNamespaceMap filter { case (pref, ns) => ns.length > 0 }
val defaultNsString = if (!declaredMap.contains(DefaultNsPrefix)) "" else """xmlns="%s"""".format(declaredMap(DefaultNsPrefix))
val prefixScopeString = (declaredMap - DefaultNsPrefix) map { case (pref, ns) => """xmlns:%s="%s"""".format(pref, ns) } mkString (" ")
List(defaultNsString, prefixScopeString) filterNot { _ == "" } mkString (" ")
}
}
object Declarations {
private def validate(prefixNamespaceMap: Map[String, String]): Unit = {
require(prefixNamespaceMap ne null)
prefixNamespaceMap foreach {
case (pref, ns) =>
require(pref ne null, s"No null prefix allowed in declarations $prefixNamespaceMap")
require(ns ne null, s"No null namespace allowed in declarations $prefixNamespaceMap")
require(
!XmlStringUtils.containsColon(pref),
s"The prefix must not contain any colon in declarations $prefixNamespaceMap")
require(
pref != "xmlns",
s"The prefix must not be 'xmlns' in declarations $prefixNamespaceMap")
require(
pref != "xml",
s"No 'xml' prefix allowed in declarations $prefixNamespaceMap")
require(
ns != "http://www.w3.org/2000/xmlns/",
s"No 'http://www.w3.org/2000/xmlns/' namespace allowed in declarations $prefixNamespaceMap")
require(
ns != XmlNamespace,
s"No 'http://www.w3.org/XML/1998/namespace' namespace allowed in declarations $prefixNamespaceMap")
}
}
/** The "empty" `Declarations` */
val Empty = Declarations(Map())
/**
* Same as the constructor, but removing the 'xml' prefix, if any.
* Therefore this call is easier to use than the constructor or default `apply` method.
*/
def from(m: Map[String, String]): Declarations = {
if (m.contains("xml")) {
require(
m("xml") == XmlNamespace,
"The 'xml' prefix must map to 'http://www.w3.org/XML/1998/namespace'")
}
Declarations(m - "xml")
}
/** Returns `from(Map[String, String](m: _*))` */
def from(m: (String, String)*): Declarations = from(Map[String, String](m: _*))
/** Returns a `Declarations` that contains (only) undeclarations for the given prefixes */
def undeclaring(prefixes: Set[String]): Declarations = {
val m = (prefixes map (pref => (pref -> ""))).toMap
Declarations(m)
}
val DefaultNsPrefix = ""
val XmlNamespace = "http://www.w3.org/XML/1998/namespace"
}
|
EBPI/yaidom
|
src/main/scala/nl/ebpi/yaidom/core/Declarations.scala
|
Scala
|
apache-2.0
| 7,508 |
package com.twitter.finagle.mysql
import com.twitter.finagle.FailureFlags
import com.twitter.finagle.mysql.transport.Packet
import com.twitter.finagle.transport.Transport
import com.twitter.util.Future
import java.lang.IllegalStateException
import java.nio.charset.StandardCharsets
private object AuthNegotiation {
/**
* Make the `AuthSwitchResponse` with the user's password or a phony
* password to cause a cache miss. We only use a phony password during testing.
*/
private def makeAuthSwitchResponse(
seqNum: Short,
salt: Array[Byte],
authInfo: AuthInfo
): AuthSwitchResponse = {
AuthSwitchResponse(
seqNum,
getPasswordForSwitchResponse(authInfo),
salt,
authInfo.settings.charset,
withSha256 = true
)
}
/**
* Get the password to send in the `AuthSwitchResponse`. This will return either a phony
* password to cause a cache miss during the `caching_sha2_password` authentication
* method, the password from the settings, or `None` if the user doesn't have a password.
* We only use a phony password during testing.
*/
private def getPasswordForSwitchResponse(authInfo: AuthInfo): Option[String] =
authInfo.settings.password match {
// Sending a password when the user doesn't have one set doesn't invalidate the cache,
// instead, the server will throw an error. Here we make sure we only send the wrong password
// to invalidate the cache when the user has a non-null password.
case Some(_) if authInfo.settings.causeAuthCacheMiss =>
Some("wrong-password") // invalidate cache to perform full auth
case None => None // if the user doesn't have the password, don't send a password
case pw => pw
}
/**
* Make the `AuthMoreData` packet that is the response to receiving
* the server's RSA key. Encrypt the password with the RSA key and
* send the password to the server.
*/
private def makeAuthMoreDataWithServersSentRsaKey(
authMoreData: AuthMoreDataFromServer,
authInfo: AuthInfo
): PasswordAuthMoreDataToServer = authMoreData.authData match {
case Some(rsaKey) =>
makeAuthMoreDataWithRsaKeyEncryptedPassword(
authMoreData,
authInfo,
new String(rsaKey, StandardCharsets.UTF_8))
case None =>
throw new NegotiationFailure(
"RSA public key is missing from the AuthMoreData packet sent from the server.")
}
/**
* Make the `AuthMoreData` packet that is sent when TLS is enabled.
* In this case we send the plaintext password to the server over
* an encrypted connection.
*/
private def makeAuthMoreDataWithPlaintextPassword(
authMoreData: AuthMoreDataFromServer,
authInfo: AuthInfo
): PasswordAuthMoreDataToServer = authInfo.settings.password match {
case Some(password) =>
val passwordBytes = PasswordUtils.addNullByteToPassword(
password.getBytes(MysqlCharset(authInfo.settings.charset).displayName))
PasswordAuthMoreDataToServer(
(authMoreData.seqNum + 1).toShort,
PerformFullAuth,
passwordBytes)
case None =>
throw new IllegalStateException(
"Null passwords should complete authentication after sending the AuthSwitchResponse")
}
/**
* Make the `AuthMoreData` packet with the password encrypted
* with the server's RSA public key.
*/
private def makeAuthMoreDataWithRsaKeyEncryptedPassword(
authMoreData: AuthMoreDataFromServer,
authInfo: AuthInfo,
rsaKey: String
): PasswordAuthMoreDataToServer = authInfo.settings.password match {
case Some(password) =>
PasswordAuthMoreDataToServer(
(authMoreData.seqNum + 1).toShort,
NeedPublicKey,
PasswordUtils.encryptPasswordWithRsaPublicKey(
password,
rsaKey,
authInfo.salt,
authInfo.settings.charset,
authInfo.serverVersion)
)
case None =>
throw new IllegalStateException(
"Null passwords should complete authentication after sending the AuthSwitchResponse")
}
/**
* Request the server's RSA public key.
*/
private def makePublicKeyRequestToServer(
authMoreData: AuthMoreDataFromServer
): PlainAuthMoreDataToServer =
PlainAuthMoreDataToServer((authMoreData.seqNum + 1).toShort, NeedPublicKey)
/**
* The exception that is thrown if something goes awry during the authentication process.
* This exception has the [[FailureFlags.NonRetryable]] flag because this error is
* thrown only in cases when the server is sent bad authentication information, or the
* server sends the client bad authentication information.
*/
private class NegotiationFailure(
message: String,
caughtException: Throwable,
val flags: Long)
extends Exception(
s"Failed to authenticate the client with the MySQL server. $message",
caughtException)
with FailureFlags[NegotiationFailure] {
def this(caughtException: Throwable) = this("", caughtException, FailureFlags.NonRetryable)
def this(message: String) = this(message, null, FailureFlags.NonRetryable)
protected def copyWithFlags(flags: Long): NegotiationFailure =
new NegotiationFailure(message, caughtException, flags)
}
private sealed trait State
private object State {
case class Init(msg: HandshakeResponse, info: AuthInfo) extends State
case class Switch(msg: AuthSwitchRequest, info: AuthInfo) extends State
case class MoreData(msg: AuthMoreDataFromServer, info: AuthInfo) extends State
}
}
/**
* The class that handles negotiating authentication. Both the `mysql_native_password`
* and the `caching_sha2_password` auth method terminate here, though the
* `native_mysql_password` terminates in [[com.twitter.finagle.mysql.AuthNegotiation.State.Init]]
* whereas `caching_sha2_password` in either the [[com.twitter.finagle.mysql.AuthNegotiation.State.Switch]]
* or [[com.twitter.finagle.mysql.AuthNegotiation.State.MoreData]] state.
*
* Authentication happens during the connection phase, which means [[doAuth()]] should
* be called during the handshake after receiving the InitialHandshake packet from the Server.
*
* @param transport the [[Transport]] used to send messages
* @param resultDecoder the decoder to use to decode the messages
*/
private class AuthNegotiation(
transport: Transport[Packet, Packet],
resultDecoder: Packet => Future[Result]) {
import AuthNegotiation._
/**
* Start the authentication process.
*
* @param initMessage the message to send to the server
* @param initAuthInfo extra information needed at every step
*/
def doAuth(initMessage: HandshakeResponse, initAuthInfo: AuthInfo): Future[Result] = step(
State.Init(initMessage, initAuthInfo))
/**
* Dispatch a message, then read and return the result.
*/
private def dispatch(msg: ProtocolMessage): Future[Result] =
transport
.write(msg.toPacket)
.before(transport.read())
.flatMap(resultDecoder)
/**
* The state machine that determines which sends the correct message
* depending on the state (Init, Switch, or MoreData) it is passed.
*/
private def step(state: State): Future[Result] = state match {
// dispatch(Init Message) -> AuthSwitchRequest | <terminate>
case State.Init(msg, info) =>
dispatch(msg).flatMap {
// Change state to Switch.
case res: AuthSwitchRequest => step(State.Switch(res, info))
// Or terminate the state machine with OK, Error, or an Exception.
case ok: OK => Future.value(ok)
case error: Error => Future.value(error)
case m =>
Future.exception(
new NegotiationFailure(s"Unrecognized or unexpected message from server: $m"))
}
// dispatch(AuthSwitchResponse) -> AuthMoreData | <terminate>
case State.Switch(msg, info) =>
val req = makeAuthSwitchResponse((msg.seqNum + 1).toShort, msg.pluginData, info)
dispatch(req).flatMap {
// Change state to MoreData.
case res: AuthMoreDataFromServer =>
val nextInfo = info.copy(salt = Some(msg.pluginData))
step(State.MoreData(res, nextInfo))
// Or terminate the state machine with OK, Error, or an Exception.
case ok: OK => Future.value(ok)
case error: Error => Future.value(error)
case m =>
Future.exception(
new NegotiationFailure(s"Unrecognized or unexpected message from server: $m"))
}
// AuthMoreData -> AuthMoreData | <terminate>
case State.MoreData(msg, info) =>
val nextState: Result => Future[Result] = {
// Stay in the MoreData state.
case more: AuthMoreDataFromServer => step(State.MoreData(more, info))
// Or terminate the state machine with OK, Error, or an Exception.
case ok: OK => Future.value(ok)
case error: Error => Future.value(error)
case m =>
Future.exception(
new NegotiationFailure(s"Unrecognized or unexpected message from server: $m"))
}
// The server sends three AuthMoreDataTypes, and the PerformFullAuth
// type is handled differently depending on if TLS is enabled or not.
// If TLS is not enabled, then we perform full auth with the server's
// RSA public key.
msg.moreDataType match {
// The user is already cached in the server so we get a fast auth success.
case FastAuthSuccess =>
info.fastAuthSuccessCounter.incr()
transport.read().flatMap(resultDecoder) // Server sends separate OK packet
// We previously sent the server the request for the RSA public key
// This AuthMoreData packet contains the server's public key.
case NeedPublicKey =>
dispatch(makeAuthMoreDataWithServersSentRsaKey(msg, info)).flatMap(nextState)
// When TLS is enabled, we send the password as plaintext.
case PerformFullAuth if info.tlsEnabled =>
dispatch(makeAuthMoreDataWithPlaintextPassword(msg, info)).flatMap(nextState)
// When TLS is not enabled we either request the RSA public key from the
// server or send the AuthMoreData packet with the password encrypted with
// the locally stored RSA public key of the server. We determine if we need
// to send the request for the RSA public key to the server by checking if a
// path to the locally stored key is provided through the
// PathToServerRsaPublicKey param.
case PerformFullAuth if !info.tlsEnabled =>
if (info.settings.pathToServerRsaPublicKey.nonEmpty) {
val rsaKey = PasswordUtils.readFromPath(info.settings.pathToServerRsaPublicKey)
val req = makeAuthMoreDataWithRsaKeyEncryptedPassword(msg, info, rsaKey)
dispatch(req).flatMap(nextState)
} else {
// Public key unknown to client, request the public key from the server.
dispatch(makePublicKeyRequestToServer(msg)).flatMap(nextState)
}
}
}
}
|
twitter/finagle
|
finagle-mysql/src/main/scala/com/twitter/finagle/mysql/AuthNegotiation.scala
|
Scala
|
apache-2.0
| 11,037 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive.execution.command
import org.apache.spark.metrics.source.HiveCatalogMetrics
import org.apache.spark.sql.catalyst.catalog.CatalogTypes.TablePartitionSpec
import org.apache.spark.sql.connector.catalog.CatalogManager
import org.apache.spark.sql.hive.test.TestHiveSingleton
/**
* The trait contains settings and utility functions. It can be mixed to the test suites for
* datasource v1 Hive external catalog. This trait complements the common trait
* `org.apache.spark.sql.execution.command.DDLCommandTestUtils` with utility functions and
* settings for all unified datasource V1 and V2 test suites.
*/
trait CommandSuiteBase extends TestHiveSingleton {
def version: String = "Hive V1" // The prefix is added to test names
def catalog: String = CatalogManager.SESSION_CATALOG_NAME
def defaultUsing: String = "USING HIVE" // The clause is used in creating tables under testing
def checkLocation(
t: String,
spec: TablePartitionSpec,
expected: String): Unit = {
val tablePath = t.split('.')
val tableName = tablePath.last
val ns = tablePath.init.mkString(".")
val partSpec = spec.map { case (key, value) => s"$key = $value"}.mkString(", ")
val information =
spark.sql(s"SHOW TABLE EXTENDED IN $ns LIKE '$tableName' PARTITION($partSpec)")
.select("information")
.first().getString(0)
val location = information.split("\\\\r?\\\\n").filter(_.startsWith("Location:")).head
assert(location.endsWith(expected))
}
def checkHiveClientCalls[T](expected: Int)(f: => T): Unit = {
HiveCatalogMetrics.reset()
assert(HiveCatalogMetrics.METRIC_HIVE_CLIENT_CALLS.getCount === 0)
f
assert(HiveCatalogMetrics.METRIC_HIVE_CLIENT_CALLS.getCount === expected)
}
}
|
wangmiao1981/spark
|
sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/command/CommandSuiteBase.scala
|
Scala
|
apache-2.0
| 2,577 |
package mesosphere.marathon
import java.util.concurrent.atomic.AtomicBoolean
import java.util.{ Timer, TimerTask }
import akka.actor.{ ActorRef, ActorSystem }
import akka.event.EventStream
import akka.testkit.{ TestKit, TestProbe }
import com.codahale.metrics.MetricRegistry
import com.twitter.common.base.ExceptionalCommand
import com.twitter.common.zookeeper.Group.JoinException
import com.twitter.common.zookeeper.{ Candidate, Group }
import mesosphere.chaos.http.HttpConf
import mesosphere.marathon.Protos.StorageVersion
import mesosphere.marathon.core.leadership.LeadershipCoordinator
import mesosphere.marathon.health.HealthCheckManager
import mesosphere.marathon.metrics.Metrics
import mesosphere.marathon.state.{ AppRepository, MarathonStore, Migration }
import mesosphere.marathon.tasks.TaskTracker
import mesosphere.util.state.memory.InMemoryStore
import mesosphere.util.state.{ FrameworkId, FrameworkIdUtil }
import org.apache.mesos.{ Protos => mesos, SchedulerDriver }
import org.mockito.Matchers.{ any, eq => mockEq }
import org.mockito.Mockito
import org.mockito.Mockito.{ times, verify, when }
import org.mockito.invocation.InvocationOnMock
import org.mockito.stubbing.Answer
import org.rogach.scallop.ScallopOption
import org.scalatest.{ BeforeAndAfterAll, Matchers }
import scala.concurrent.Future
import scala.concurrent.duration._
object MarathonSchedulerServiceTest {
import Mockito.mock
val ReconciliationDelay = 5000L
val ReconciliationInterval = 5000L
val ScaleAppsDelay = 4000L
val ScaleAppsInterval = 4000L
val MaxActorStartupTime = 5000L
def mockConfig = {
val config = mock(classOf[MarathonConf])
when(config.reconciliationInitialDelay).thenReturn(scallopOption(Some(ReconciliationDelay)))
when(config.reconciliationInterval).thenReturn(scallopOption(Some(ReconciliationInterval)))
when(config.scaleAppsInitialDelay).thenReturn(scallopOption(Some(ScaleAppsDelay)))
when(config.scaleAppsInterval).thenReturn(scallopOption(Some(ScaleAppsInterval)))
when(config.zkTimeoutDuration).thenReturn(1.second)
when(config.maxActorStartupTime).thenReturn(scallopOption(Some(MaxActorStartupTime)))
config
}
def scallopOption[A](a: Option[A]): ScallopOption[A] = {
new ScallopOption[A]("") {
override def get = a
override def apply() = a.get
}
}
}
class MarathonSchedulerServiceTest
extends TestKit(ActorSystem("System"))
with MarathonSpec
with BeforeAndAfterAll
with Matchers {
import MarathonSchedulerServiceTest._
import system.dispatcher
private[this] var probe: TestProbe = _
private[this] var leadershipCoordinator: LeadershipCoordinator = _
private[this] var healthCheckManager: HealthCheckManager = _
private[this] var candidate: Option[Candidate] = _
private[this] var config: MarathonConf = _
private[this] var httpConfig: HttpConf = _
private[this] var frameworkIdUtil: FrameworkIdUtil = _
private[this] var leader: AtomicBoolean = _
private[this] var appRepository: AppRepository = _
private[this] var taskTracker: TaskTracker = _
private[this] var scheduler: MarathonScheduler = _
private[this] var migration: Migration = _
private[this] var schedulerActor: ActorRef = _
private[this] var events: EventStream = _
before {
probe = TestProbe()
leadershipCoordinator = mock[LeadershipCoordinator]
healthCheckManager = mock[HealthCheckManager]
candidate = mock[Option[Candidate]]
config = mockConfig
httpConfig = mock[HttpConf]
frameworkIdUtil = mock[FrameworkIdUtil]
leader = mock[AtomicBoolean]
appRepository = mock[AppRepository]
taskTracker = mock[TaskTracker]
scheduler = mock[MarathonScheduler]
migration = mock[Migration]
schedulerActor = probe.ref
events = new EventStream()
}
def driverFactory[T](provide: => SchedulerDriver): SchedulerDriverFactory = {
new SchedulerDriverFactory {
override def createDriver(): SchedulerDriver = provide
}
}
test("Start timer when elected") {
val mockTimer = mock[Timer]
when(frameworkIdUtil.fetch()).thenReturn(None)
val schedulerService = new MarathonSchedulerService(
leadershipCoordinator,
healthCheckManager,
candidate,
config,
frameworkIdUtil,
leader,
appRepository,
taskTracker,
driverFactory(mock[SchedulerDriver]),
system,
migration,
schedulerActor,
events
) {
override def runDriver(abdicateCmdOption: Option[ExceptionalCommand[JoinException]]): Unit = ()
}
schedulerService.timer = mockTimer
when(leadershipCoordinator.prepareForStart()).thenReturn(Future.successful(()))
schedulerService.onElected(mock[ExceptionalCommand[Group.JoinException]])
verify(mockTimer).schedule(any[TimerTask](), mockEq(ReconciliationDelay), mockEq(ReconciliationInterval))
verify(mockTimer).schedule(any(), mockEq(ReconciliationDelay + ReconciliationInterval))
}
test("Cancel timer when defeated") {
val mockTimer = mock[Timer]
when(frameworkIdUtil.fetch()).thenReturn(None)
val schedulerService = new MarathonSchedulerService(
leadershipCoordinator,
healthCheckManager,
candidate,
config,
frameworkIdUtil,
leader,
appRepository,
taskTracker,
driverFactory(mock[SchedulerDriver]),
system,
migration,
schedulerActor,
events
) {
override def runDriver(abdicateCmdOption: Option[ExceptionalCommand[JoinException]]): Unit = ()
}
schedulerService.timer = mockTimer
schedulerService.onDefeated()
verify(mockTimer).cancel()
assert(schedulerService.timer != mockTimer, "Timer should be replaced after leadership defeat")
}
test("Re-enable timer when re-elected") {
val mockTimer = mock[Timer]
when(frameworkIdUtil.fetch()).thenReturn(None)
val schedulerService = new MarathonSchedulerService(
leadershipCoordinator,
healthCheckManager,
candidate,
config,
frameworkIdUtil,
leader,
appRepository,
taskTracker,
driverFactory(mock[SchedulerDriver]),
system,
migration,
schedulerActor,
events
) {
override def runDriver(abdicateCmdOption: Option[ExceptionalCommand[JoinException]]): Unit = ()
override def newTimer() = mockTimer
}
when(leadershipCoordinator.prepareForStart()).thenReturn(Future.successful(()))
schedulerService.onElected(mock[ExceptionalCommand[Group.JoinException]])
schedulerService.onDefeated()
schedulerService.onElected(mock[ExceptionalCommand[Group.JoinException]])
verify(mockTimer, times(2)).schedule(any(), mockEq(ScaleAppsDelay), mockEq(ScaleAppsInterval))
verify(mockTimer, times(2)).schedule(any[TimerTask](), mockEq(ReconciliationDelay), mockEq(ReconciliationInterval))
verify(mockTimer, times(2)).schedule(any(), mockEq(ReconciliationDelay + ReconciliationInterval))
verify(mockTimer).cancel()
}
test("Always fetch current framework ID") {
val frameworkId = mesos.FrameworkID.newBuilder.setValue("myId").build()
val mockTimer = mock[Timer]
val metrics = new Metrics(new MetricRegistry)
val store = new MarathonStore[FrameworkId](new InMemoryStore, metrics, () => new FrameworkId(""))
frameworkIdUtil = new FrameworkIdUtil(store, Duration.Inf)
val schedulerService = new MarathonSchedulerService(
leadershipCoordinator,
healthCheckManager,
candidate,
config,
frameworkIdUtil,
leader,
appRepository,
taskTracker,
driverFactory(mock[SchedulerDriver]),
system,
migration,
schedulerActor,
events
) {
override def runDriver(abdicateCmdOption: Option[ExceptionalCommand[JoinException]]): Unit = ()
override def newTimer() = mockTimer
}
schedulerService.frameworkId should be(None)
implicit lazy val timeout = 1.second
frameworkIdUtil.store(frameworkId)
awaitAssert(schedulerService.frameworkId should be(Some(frameworkId)))
}
test("Abdicate leadership when migration fails and reoffer leadership") {
when(frameworkIdUtil.fetch()).thenReturn(None)
candidate = Some(mock[Candidate])
val schedulerService = new MarathonSchedulerService(
leadershipCoordinator,
healthCheckManager,
candidate,
config,
frameworkIdUtil,
leader,
appRepository,
taskTracker,
driverFactory(mock[SchedulerDriver]),
system,
migration,
schedulerActor,
events
) {
override def runDriver(abdicateCmdOption: Option[ExceptionalCommand[JoinException]]): Unit = ()
}
// use an Answer object here because Mockito's thenThrow does only
// allow to throw RuntimeExceptions
when(migration.migrate()).thenAnswer(new Answer[StorageVersion] {
override def answer(invocation: InvocationOnMock): StorageVersion = {
import java.util.concurrent.TimeoutException
throw new TimeoutException("Failed to wait for future within timeout")
}
})
schedulerService.onElected(mock[ExceptionalCommand[Group.JoinException]])
awaitAssert { verify(candidate.get).offerLeadership(schedulerService) }
leader.get() should be (false)
}
test("Abdicate leadership when the driver creation fails by some exception") {
when(frameworkIdUtil.fetch()).thenReturn(None)
candidate = Some(mock[Candidate])
val driverFactory = mock[SchedulerDriverFactory]
val schedulerService = new MarathonSchedulerService(
leadershipCoordinator,
healthCheckManager,
candidate,
config,
frameworkIdUtil,
leader,
appRepository,
taskTracker,
driverFactory,
system,
migration,
schedulerActor,
events
) {
override def runDriver(abdicateCmdOption: Option[ExceptionalCommand[JoinException]]): Unit = ()
}
when(leadershipCoordinator.prepareForStart()).thenReturn(Future.successful(()))
when(driverFactory.createDriver()).thenThrow(new Exception("Some weird exception"))
schedulerService.onElected(mock[ExceptionalCommand[Group.JoinException]])
verify(candidate.get, Mockito.timeout(1000)).offerLeadership(schedulerService)
leader.get() should be (false)
}
test("Abdicate leadership when prepareStart throws an exception") {
when(frameworkIdUtil.fetch()).thenReturn(None)
candidate = Some(mock[Candidate])
val driverFactory = mock[SchedulerDriverFactory]
val schedulerService = new MarathonSchedulerService(
leadershipCoordinator,
healthCheckManager,
candidate,
config,
frameworkIdUtil,
leader,
appRepository,
taskTracker,
driverFactory,
system,
migration,
schedulerActor,
events
) {
override def runDriver(abdicateCmdOption: Option[ExceptionalCommand[JoinException]]): Unit = ()
}
when(leadershipCoordinator.prepareForStart()).thenReturn(Future.failed(new RuntimeException("fail")))
when(driverFactory.createDriver()).thenReturn(mock[SchedulerDriver])
schedulerService.onElected(mock[ExceptionalCommand[Group.JoinException]])
verify(candidate.get, Mockito.timeout(1000)).offerLeadership(schedulerService)
leader.get() should be (false)
}
}
|
MrMarvin/marathon
|
src/test/scala/mesosphere/marathon/MarathonSchedulerServiceTest.scala
|
Scala
|
apache-2.0
| 11,326 |
/*
* Copyright 2016 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package fetch
import java.net.URL
import services.model.StatusLogger
import uk.gov.hmrc.logging.Stdout
// for manual test/development
object SardineWrapperSketch {
def main(args: Array[String]) {
val status = new StatusLogger(Stdout)
if (args.length > 2) {
val finder = new SardineWrapper(new URL(args(0)), args(1), args(2), None, new SardineFactory2)
val top = finder.exploreRemoteTree
Stdout.info(top.toString)
} else if (args.length > 0) {
val finder = new SardineWrapper(new URL(args(0)), "", "", None, new SardineFactory2)
val top = finder.exploreRemoteTree
Stdout.info(top.toString)
}
}
}
|
andywhardy/address-reputation-ingester
|
test/unit/fetch/SardineWrapperSketch.scala
|
Scala
|
apache-2.0
| 1,260 |
#!/bin/bash
exec ${SCALA_HOME}/bin/scala "$0" "$@" 2>&1
!#
import sfc.board._
/*
* @author [email protected]
*/
object `setter-for-catan` {
def main(args: Array[String]) {
args.length match {
case 1 => {
// TODO: use graphics to display board
println(args(0) match {
case "small" => SmallBoard.board
case "small-spiral" => SmallSpiralBoard.board
case "small-traders-and-barbarians" => SmallTradersAndBarbariansBoard.board
case "small-traders-and-barbarians-spiral" => SmallTradersAndBarbariansSpiralBoard.board
})
}
case 2 => {
if (args(1) == "count") {
val probability = args(0) match {
case "small" => SmallBoard.probability
case "small-spiral" => SmallSpiralBoard.probability
case "small-traders-and-barbarians" => SmallTradersAndBarbariansBoard.probability
case "small-traders-and-barbarians-spiral" => SmallTradersAndBarbariansSpiralBoard.probability
}
println (probability)
}
}
}
}
}
|
noel-yap/setter-for-catan
|
setter-for-catan.scala
|
Scala
|
apache-2.0
| 1,083 |
package com.github.novamage.svalidator.validation
/** Contains an error message and any values to be formatted with it
*
* @param messageKey Raw error message or a key string to be used in localization files
* @param messageFormatValues Arguments to interpolate into the string.
*/
case class MessageParts(messageKey: String,
messageFormatValues: List[Any] = Nil) {
/** Returns the message key of this object formatted alongside the format values using
* [[scala.collection.immutable.StringLike#format StringLike.format]]'s method.
*/
def message: String = messageKey.format(messageFormatValues: _*)
/** Returns new parts whose messageKey is generated applying the localizer to the current message key.
*
* Message format values are not altered in any way.
*
* @param localizer Localizer to apply to the messageKey
*/
def localize(implicit localizer: Localizer): MessageParts = {
MessageParts(localizer.localize(messageKey), messageFormatValues)
}
}
|
NovaMage/SValidator
|
src/main/scala/com/github/novamage/svalidator/validation/MessageParts.scala
|
Scala
|
mit
| 1,028 |
package leibniz.inhabitance
//import cats.~>
import leibniz.variance.{Constant, Injective}
import leibniz.{=!=, WeakApart}
sealed abstract class PartiallyInhabited[F[_]] {
import PartiallyInhabited._
/**
* A positive type argument. `F[Positive]` is inhabited.
*/
type Positive
/**
* A negative type argument. `F[Negative]` is uninhabited.
*/
type Negative
/**
* A proof that `F[Positive]` is inhabited.
*/
def positive: Inhabited[F[Positive]]
/**
* A proof that `F[Negative]` is uninhabited.
*/
def negative: Uninhabited[F[Negative]]
/**
* Since `F[Positive]` is inhabited, and `F[Negative]` is uninhabited,
* an equality between [[Positive]] and [[Negative]] would lead to
* a contradiction.
*/
def unequal: Positive =!= Negative = WeakApart.witness(p =>
p.subst[λ[x => Inhabited[F[x]]]](positive).notUninhabited(negative))
/**
* Partially inhabited type constructors are necessarily injective.
*/
def injective: Injective[F] =
Injective.witness3[F, λ[x => x], Positive, Negative](positive, negative)
/**
* Given two total natural transformations `F ~> G` and `G ~> F`,
* we can transform a positive (inhabited) value `F[Positive]` into
* `G[Positive]` and negative `F[Negative]` into `G[Negative]`.
* This implies that [[G]] is partially inhabited as well.
*/
// def imap[G[_]](to: F ~> G, from: G ~> F): PartiallyInhabited[G] =
// witness[G, Positive, Negative](positive.map(to.apply), negative.contramap(from.apply))
def notTotallyInhabited(t: TotallyInhabited[F]): Void =
negative.notInhabited(t.proof[Negative])
def notTotallyUninhabited(t: TotallyUninhabited[F]): Void =
positive.notUninhabited(t.proof[Positive])
def notConstant(F: Constant[F]): Void =
F[Positive, Negative].subst(positive).notUninhabited(negative)
}
object PartiallyInhabited {
final class Witness[F[_], P, N](P: Inhabited[F[P]], N: Uninhabited[F[N]]) extends PartiallyInhabited[F] {
type Positive = P
type Negative = N
def positive: Inhabited[F[Positive]] = P
def negative: Uninhabited[F[Negative]] = N
}
/**
* Witness that [[F]] is a partially inhabited type constructor by providing
* evidence that `F[P]` is inhabited and `F[N]` is uninhabited.
*/
def witness[F[_], P, N](P: Inhabited[F[P]], N: Uninhabited[F[N]]) =
new Witness[F, P, N](P, N)
}
|
alexknvl/leibniz
|
src/main/scala/leibniz/inhabitance/PartiallyInhabited.scala
|
Scala
|
mit
| 2,414 |
/*
* Copyright 2012 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.expecty
import language.experimental.macros
// should have two type parameters; one for recorded type, another for returned type
// so far failed to implement the macro side of this
abstract class Recorder {
val listener: RecorderListener[Boolean]
def apply(recording: Boolean): Boolean = macro RecorderMacro.apply
}
|
pniederw/expecty
|
src/main/scala/org/expecty/Recorder.scala
|
Scala
|
apache-2.0
| 932 |
object MergeIntervals extends App {
def time[R](block: => R): R = {
val t0 = System.currentTimeMillis()
val result = block // call-by-name
val t1 = System.currentTimeMillis()
println("Elapsed time: " + (t1 - t0) + "ms")
result
}
implicit val orderRanges = Ordering.by {range: Range => range.head}
implicit class RangeOps(a: Range) {
def overlaps(b: Range): Boolean = {
a.last >= b.head
}
def merge(b: Range): Range = {
val start = Math.min(a.head, b.head)
val end = Math.max(a.last, b.last)
start to end
}
}
def mergeIntervals(input: Vector[Range]): Vector[Range] = {
input.sorted.foldLeft[Vector[Range]](Vector())((acc, next) => {
acc match {
case ranges :+ lastRange if lastRange.overlaps(next) => ranges :+ lastRange.merge(next)
case _ => acc :+ next
}
})
}
val input = Vector(1 to 3, 2 to 6, 8 to 10, 8 to 10, 15 to 18)
val output = time {
mergeIntervals(input)
}
assert(output == Vector(1 to 6, 8 to 10, 15 to 18))
}
|
marcosfede/algorithms
|
array/merge_intervals/merge_intervals.scala
|
Scala
|
gpl-3.0
| 1,050 |
package pl.arapso.scaffoldings.scala.custom.ip
import java.io.{BufferedWriter, File, FileWriter}
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import scala.util.Random
object App {
val MinSize: Int = 1 * 1024
val MaxSize: Int = 10 * 1024
def main(args: Array[String]) {
for(i <- 1 to 100) genIpFile(i)
}
def genIpFile(no: Int) = {
val maxFileSize = fileSize
println(s"Bytes to write $maxFileSize")
val writer = new IpFileWriter(f"/tmp/scala/IpGenerator/part-$no%06d.txt", maxFileSize)
while(writer << IpGenerator.genIp){}
writer.close
}
def fileSize(): Long = {
scala.util.Random.nextInt(MaxSize - MinSize) + MinSize
}
}
object IpGenerator {
private val UpperBoundIpOctetNumber = 256
def genIp: String = {
for(_ <- 1 to 4)
yield Random.nextInt(UpperBoundIpOctetNumber).toString
}.mkString(".")
}
// write some comment to
class IpFileWriter(filePath: String, maxFileSize: Long) {
private val tempFile = new File(filePath)
private val fileWriter: BufferedWriter = create;
private var wroteBytes: Long = 0l
def create = {
val dir = new File(tempFile.getParent)
if(!dir.exists) dir.mkdirs()
new BufferedWriter(new FileWriter(tempFile))
}
def << (ip: String): Boolean = {
fileWriter.append(ip)
fileWriter.newLine()
wroteBytes += ip.getBytes.length + 1
wroteBytes < maxFileSize
}
def close = {
fileWriter.close()
}
def addFileSizeToFileName = {
val newFilePrefix = filePath.substring(0, filePath.lastIndexOf("."))
val newFileExtension = filePath.substring(filePath.lastIndexOf("."), filePath.length)
val oldFile = new File(filePath)
val newFile = new File(s"$newFilePrefix-$wroteBytes$newFileExtension")
oldFile.renameTo(newFile)
}
}
|
arapso-scaffoldings/scala
|
scala-tutor/custom/src/main/scala/pl/arapso/scaffoldings/scala/custom/ip/IpGenerator.scala
|
Scala
|
apache-2.0
| 1,827 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.expressions
import org.apache.spark.SparkException
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions.codegen.{UnsafeArrayWriter, UnsafeRowWriter, UnsafeWriter}
import org.apache.spark.sql.catalyst.util.ArrayData
import org.apache.spark.sql.types.{UserDefinedType, _}
import org.apache.spark.unsafe.Platform
/**
* An interpreted unsafe projection. This class reuses the [[UnsafeRow]] it produces, a consumer
* should copy the row if it is being buffered. This class is not thread safe.
*
* @param expressions that produces the resulting fields. These expressions must be bound
* to a schema.
*/
class InterpretedUnsafeProjection(expressions: Array[Expression]) extends UnsafeProjection {
import InterpretedUnsafeProjection._
/** Number of (top level) fields in the resulting row. */
private[this] val numFields = expressions.length
/** Array that expression results. */
private[this] val values = new Array[Any](numFields)
/** The row representing the expression results. */
private[this] val intermediate = new GenericInternalRow(values)
/* The row writer for UnsafeRow result */
private[this] val rowWriter = new UnsafeRowWriter(numFields, numFields * 32)
/** The writer that writes the intermediate result to the result row. */
private[this] val writer: InternalRow => Unit = {
val baseWriter = generateStructWriter(
rowWriter,
expressions.map(e => StructField("", e.dataType, e.nullable)))
if (!expressions.exists(_.nullable)) {
// No nullable fields. The top-level null bit mask will always be zeroed out.
baseWriter
} else {
// Zero out the null bit mask before we write the row.
row => {
rowWriter.zeroOutNullBytes()
baseWriter(row)
}
}
}
override def initialize(partitionIndex: Int): Unit = {
expressions.foreach(_.foreach {
case n: Nondeterministic => n.initialize(partitionIndex)
case _ =>
})
}
override def apply(row: InternalRow): UnsafeRow = {
// Put the expression results in the intermediate row.
var i = 0
while (i < numFields) {
values(i) = expressions(i).eval(row)
i += 1
}
// Write the intermediate row to an unsafe row.
rowWriter.reset()
writer(intermediate)
rowWriter.getRow()
}
}
/**
* Helper functions for creating an [[InterpretedUnsafeProjection]].
*/
object InterpretedUnsafeProjection {
/**
* Returns an [[UnsafeProjection]] for given sequence of bound Expressions.
*/
def createProjection(exprs: Seq[Expression]): UnsafeProjection = {
// We need to make sure that we do not reuse stateful expressions.
val cleanedExpressions = exprs.map(_.transform {
case s: Stateful => s.freshCopy()
})
new InterpretedUnsafeProjection(cleanedExpressions.toArray)
}
/**
* Generate a struct writer function. The generated function writes an [[InternalRow]] to the
* given buffer using the given [[UnsafeRowWriter]].
*/
private def generateStructWriter(
rowWriter: UnsafeRowWriter,
fields: Array[StructField]): InternalRow => Unit = {
val numFields = fields.length
// Create field writers.
val fieldWriters = fields.map { field =>
generateFieldWriter(rowWriter, field.dataType, field.nullable)
}
// Create basic writer.
row => {
var i = 0
while (i < numFields) {
fieldWriters(i).apply(row, i)
i += 1
}
}
}
/**
* Generate a writer function for a struct field, array element, map key or map value. The
* generated function writes the element at an index in a [[SpecializedGetters]] object (row
* or array) to the given buffer using the given [[UnsafeWriter]].
*/
private def generateFieldWriter(
writer: UnsafeWriter,
dt: DataType,
nullable: Boolean): (SpecializedGetters, Int) => Unit = {
// Create the basic writer.
val unsafeWriter: (SpecializedGetters, Int) => Unit = dt match {
case BooleanType =>
(v, i) => writer.write(i, v.getBoolean(i))
case ByteType =>
(v, i) => writer.write(i, v.getByte(i))
case ShortType =>
(v, i) => writer.write(i, v.getShort(i))
case IntegerType | DateType =>
(v, i) => writer.write(i, v.getInt(i))
case LongType | TimestampType =>
(v, i) => writer.write(i, v.getLong(i))
case FloatType =>
(v, i) => writer.write(i, v.getFloat(i))
case DoubleType =>
(v, i) => writer.write(i, v.getDouble(i))
case DecimalType.Fixed(precision, scale) =>
(v, i) => writer.write(i, v.getDecimal(i, precision, scale), precision, scale)
case CalendarIntervalType =>
(v, i) => writer.write(i, v.getInterval(i))
case BinaryType =>
(v, i) => writer.write(i, v.getBinary(i))
case StringType =>
(v, i) => writer.write(i, v.getUTF8String(i))
case StructType(fields) =>
val numFields = fields.length
val rowWriter = new UnsafeRowWriter(writer, numFields)
val structWriter = generateStructWriter(rowWriter, fields)
(v, i) => {
v.getStruct(i, fields.length) match {
case row: UnsafeRow =>
writer.write(i, row)
case row =>
val previousCursor = writer.cursor()
// Nested struct. We don't know where this will start because a row can be
// variable length, so we need to update the offsets and zero out the bit mask.
rowWriter.resetRowWriter()
structWriter.apply(row)
writer.setOffsetAndSizeFromPreviousCursor(i, previousCursor)
}
}
case ArrayType(elementType, containsNull) =>
val arrayWriter = new UnsafeArrayWriter(writer, getElementSize(elementType))
val elementWriter = generateFieldWriter(
arrayWriter,
elementType,
containsNull)
(v, i) => {
val previousCursor = writer.cursor()
writeArray(arrayWriter, elementWriter, v.getArray(i))
writer.setOffsetAndSizeFromPreviousCursor(i, previousCursor)
}
case MapType(keyType, valueType, valueContainsNull) =>
val keyArrayWriter = new UnsafeArrayWriter(writer, getElementSize(keyType))
val keyWriter = generateFieldWriter(
keyArrayWriter,
keyType,
nullable = false)
val valueArrayWriter = new UnsafeArrayWriter(writer, getElementSize(valueType))
val valueWriter = generateFieldWriter(
valueArrayWriter,
valueType,
valueContainsNull)
(v, i) => {
v.getMap(i) match {
case map: UnsafeMapData =>
writer.write(i, map)
case map =>
val previousCursor = writer.cursor()
// preserve 8 bytes to write the key array numBytes later.
valueArrayWriter.grow(8)
valueArrayWriter.increaseCursor(8)
// Write the keys and write the numBytes of key array into the first 8 bytes.
writeArray(keyArrayWriter, keyWriter, map.keyArray())
Platform.putLong(
valueArrayWriter.getBuffer,
previousCursor,
valueArrayWriter.cursor - previousCursor - 8
)
// Write the values.
writeArray(valueArrayWriter, valueWriter, map.valueArray())
writer.setOffsetAndSizeFromPreviousCursor(i, previousCursor)
}
}
case udt: UserDefinedType[_] =>
generateFieldWriter(writer, udt.sqlType, nullable)
case NullType =>
(_, _) => {}
case _ =>
throw new SparkException(s"Unsupported data type $dt")
}
// Always wrap the writer with a null safe version.
dt match {
case _: UserDefinedType[_] =>
// The null wrapper depends on the sql type and not on the UDT.
unsafeWriter
case DecimalType.Fixed(precision, _) if precision > Decimal.MAX_LONG_DIGITS =>
// We can't call setNullAt() for DecimalType with precision larger than 18, we call write
// directly. We can use the unwrapped writer directly.
unsafeWriter
case BooleanType | ByteType =>
(v, i) => {
if (!v.isNullAt(i)) {
unsafeWriter(v, i)
} else {
writer.setNull1Bytes(i)
}
}
case ShortType =>
(v, i) => {
if (!v.isNullAt(i)) {
unsafeWriter(v, i)
} else {
writer.setNull2Bytes(i)
}
}
case IntegerType | DateType | FloatType =>
(v, i) => {
if (!v.isNullAt(i)) {
unsafeWriter(v, i)
} else {
writer.setNull4Bytes(i)
}
}
case _ =>
(v, i) => {
if (!v.isNullAt(i)) {
unsafeWriter(v, i)
} else {
writer.setNull8Bytes(i)
}
}
}
}
/**
* Get the number of bytes elements of a data type will occupy in the fixed part of an
* [[UnsafeArrayData]] object. Reference types are stored as an 8 byte combination of an
* offset (upper 4 bytes) and a length (lower 4 bytes), these point to the variable length
* portion of the array object. Primitives take up to 8 bytes, depending on the size of the
* underlying data type.
*/
private def getElementSize(dataType: DataType): Int = dataType match {
case NullType | StringType | BinaryType | CalendarIntervalType |
_: DecimalType | _: StructType | _: ArrayType | _: MapType => 8
case _ => dataType.defaultSize
}
/**
* Write an array to the buffer. If the array is already in serialized form (an instance of
* [[UnsafeArrayData]]) then we copy the bytes directly, otherwise we do an element-by-element
* copy.
*/
private def writeArray(
arrayWriter: UnsafeArrayWriter,
elementWriter: (SpecializedGetters, Int) => Unit,
array: ArrayData): Unit = array match {
case unsafe: UnsafeArrayData =>
arrayWriter.write(unsafe)
case _ =>
val numElements = array.numElements()
arrayWriter.initialize(numElements)
var i = 0
while (i < numElements) {
elementWriter.apply(array, i)
i += 1
}
}
}
|
goldmedal/spark
|
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/InterpretedUnsafeProjection.scala
|
Scala
|
apache-2.0
| 11,219 |
trait A
trait B
val AB: A with B { type a } = new A with B
// False
|
katejim/intellij-scala
|
testdata/typeConformance/compound/AWithBWithMissingType.scala
|
Scala
|
apache-2.0
| 68 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.spark
import java.io.ByteArrayInputStream
import java.nio.ByteBuffer
import java.sql.Timestamp
import java.util
import java.util.HashMap
import org.apache.avro.SchemaBuilder.BaseFieldTypeBuilder
import org.apache.avro.SchemaBuilder.BaseTypeBuilder
import org.apache.avro.SchemaBuilder.FieldAssembler
import org.apache.avro.SchemaBuilder.FieldDefault
import org.apache.avro.SchemaBuilder.RecordBuilder
import org.apache.avro.io._
import org.apache.commons.io.output.ByteArrayOutputStream
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.util.Bytes
import scala.collection.JavaConversions._
import org.apache.avro.{SchemaBuilder, Schema}
import org.apache.avro.Schema.Type._
import org.apache.avro.generic.GenericData.{Record, Fixed}
import org.apache.avro.generic.{GenericDatumReader, GenericDatumWriter, GenericData, GenericRecord}
import org.apache.spark.sql.Row
import org.apache.spark.sql.types._
import scala.collection.immutable.Map
@InterfaceAudience.Private
abstract class AvroException(msg: String) extends Exception(msg)
@InterfaceAudience.Private
case class SchemaConversionException(msg: String) extends AvroException(msg)
/***
* On top level, the converters provide three high level interface.
* 1. toSqlType: This function takes an avro schema and returns a sql schema.
* 2. createConverterToSQL: Returns a function that is used to convert avro types to their
* corresponding sparkSQL representations.
* 3. convertTypeToAvro: This function constructs converter function for a given sparkSQL
* datatype. This is used in writing Avro records out to disk
*/
@InterfaceAudience.Private
object SchemaConverters {
case class SchemaType(dataType: DataType, nullable: Boolean)
/**
* This function takes an avro schema and returns a sql schema.
*/
def toSqlType(avroSchema: Schema): SchemaType = {
avroSchema.getType match {
case INT => SchemaType(IntegerType, nullable = false)
case STRING => SchemaType(StringType, nullable = false)
case BOOLEAN => SchemaType(BooleanType, nullable = false)
case BYTES => SchemaType(BinaryType, nullable = false)
case DOUBLE => SchemaType(DoubleType, nullable = false)
case FLOAT => SchemaType(FloatType, nullable = false)
case LONG => SchemaType(LongType, nullable = false)
case FIXED => SchemaType(BinaryType, nullable = false)
case ENUM => SchemaType(StringType, nullable = false)
case RECORD =>
val fields = avroSchema.getFields.map { f =>
val schemaType = toSqlType(f.schema())
StructField(f.name, schemaType.dataType, schemaType.nullable)
}
SchemaType(StructType(fields), nullable = false)
case ARRAY =>
val schemaType = toSqlType(avroSchema.getElementType)
SchemaType(
ArrayType(schemaType.dataType, containsNull = schemaType.nullable),
nullable = false)
case MAP =>
val schemaType = toSqlType(avroSchema.getValueType)
SchemaType(
MapType(StringType, schemaType.dataType, valueContainsNull = schemaType.nullable),
nullable = false)
case UNION =>
if (avroSchema.getTypes.exists(_.getType == NULL)) {
// In case of a union with null, eliminate it and make a recursive call
val remainingUnionTypes = avroSchema.getTypes.filterNot(_.getType == NULL)
if (remainingUnionTypes.size == 1) {
toSqlType(remainingUnionTypes.get(0)).copy(nullable = true)
} else {
toSqlType(Schema.createUnion(remainingUnionTypes)).copy(nullable = true)
}
} else avroSchema.getTypes.map(_.getType) match {
case Seq(t1, t2) if Set(t1, t2) == Set(INT, LONG) =>
SchemaType(LongType, nullable = false)
case Seq(t1, t2) if Set(t1, t2) == Set(FLOAT, DOUBLE) =>
SchemaType(DoubleType, nullable = false)
case other => throw new SchemaConversionException(
s"This mix of union types is not supported: $other")
}
case other => throw new SchemaConversionException(s"Unsupported type $other")
}
}
/**
* This function converts sparkSQL StructType into avro schema. This method uses two other
* converter methods in order to do the conversion.
*/
private def convertStructToAvro[T](
structType: StructType,
schemaBuilder: RecordBuilder[T],
recordNamespace: String): T = {
val fieldsAssembler: FieldAssembler[T] = schemaBuilder.fields()
structType.fields.foreach { field =>
val newField = fieldsAssembler.name(field.name).`type`()
if (field.nullable) {
convertFieldTypeToAvro(field.dataType, newField.nullable(), field.name, recordNamespace)
.noDefault
} else {
convertFieldTypeToAvro(field.dataType, newField, field.name, recordNamespace)
.noDefault
}
}
fieldsAssembler.endRecord()
}
/**
* Returns a function that is used to convert avro types to their
* corresponding sparkSQL representations.
*/
def createConverterToSQL(schema: Schema): Any => Any = {
schema.getType match {
// Avro strings are in Utf8, so we have to call toString on them
case STRING | ENUM => (item: Any) => if (item == null) null else item.toString
case INT | BOOLEAN | DOUBLE | FLOAT | LONG => identity
// Byte arrays are reused by avro, so we have to make a copy of them.
case FIXED => (item: Any) => if (item == null) {
null
} else {
item.asInstanceOf[Fixed].bytes().clone()
}
case BYTES => (item: Any) => if (item == null) {
null
} else {
val bytes = item.asInstanceOf[ByteBuffer]
val javaBytes = new Array[Byte](bytes.remaining)
bytes.get(javaBytes)
javaBytes
}
case RECORD =>
val fieldConverters = schema.getFields.map(f => createConverterToSQL(f.schema))
(item: Any) => if (item == null) {
null
} else {
val record = item.asInstanceOf[GenericRecord]
val converted = new Array[Any](fieldConverters.size)
var idx = 0
while (idx < fieldConverters.size) {
converted(idx) = fieldConverters.apply(idx)(record.get(idx))
idx += 1
}
Row.fromSeq(converted.toSeq)
}
case ARRAY =>
val elementConverter = createConverterToSQL(schema.getElementType)
(item: Any) => if (item == null) {
null
} else {
try {
item.asInstanceOf[GenericData.Array[Any]].map(elementConverter)
} catch {
case e: Throwable =>
item.asInstanceOf[util.ArrayList[Any]].map(elementConverter)
}
}
case MAP =>
val valueConverter = createConverterToSQL(schema.getValueType)
(item: Any) => if (item == null) {
null
} else {
item.asInstanceOf[HashMap[Any, Any]].map(x => (x._1.toString, valueConverter(x._2))).toMap
}
case UNION =>
if (schema.getTypes.exists(_.getType == NULL)) {
val remainingUnionTypes = schema.getTypes.filterNot(_.getType == NULL)
if (remainingUnionTypes.size == 1) {
createConverterToSQL(remainingUnionTypes.get(0))
} else {
createConverterToSQL(Schema.createUnion(remainingUnionTypes))
}
} else schema.getTypes.map(_.getType) match {
case Seq(t1, t2) if Set(t1, t2) == Set(INT, LONG) =>
(item: Any) => {
item match {
case l: Long => l
case i: Int => i.toLong
case null => null
}
}
case Seq(t1, t2) if Set(t1, t2) == Set(FLOAT, DOUBLE) =>
(item: Any) => {
item match {
case d: Double => d
case f: Float => f.toDouble
case null => null
}
}
case other => throw new SchemaConversionException(
s"This mix of union types is not supported (see README): $other")
}
case other => throw new SchemaConversionException(s"invalid avro type: $other")
}
}
/**
* This function is used to convert some sparkSQL type to avro type. Note that this function won't
* be used to construct fields of avro record (convertFieldTypeToAvro is used for that).
*/
private def convertTypeToAvro[T](
dataType: DataType,
schemaBuilder: BaseTypeBuilder[T],
structName: String,
recordNamespace: String): T = {
dataType match {
case ByteType => schemaBuilder.intType()
case ShortType => schemaBuilder.intType()
case IntegerType => schemaBuilder.intType()
case LongType => schemaBuilder.longType()
case FloatType => schemaBuilder.floatType()
case DoubleType => schemaBuilder.doubleType()
case _: DecimalType => schemaBuilder.stringType()
case StringType => schemaBuilder.stringType()
case BinaryType => schemaBuilder.bytesType()
case BooleanType => schemaBuilder.booleanType()
case TimestampType => schemaBuilder.longType()
case ArrayType(elementType, _) =>
val builder = getSchemaBuilder(dataType.asInstanceOf[ArrayType].containsNull)
val elementSchema = convertTypeToAvro(elementType, builder, structName, recordNamespace)
schemaBuilder.array().items(elementSchema)
case MapType(StringType, valueType, _) =>
val builder = getSchemaBuilder(dataType.asInstanceOf[MapType].valueContainsNull)
val valueSchema = convertTypeToAvro(valueType, builder, structName, recordNamespace)
schemaBuilder.map().values(valueSchema)
case structType: StructType =>
convertStructToAvro(
structType,
schemaBuilder.record(structName).namespace(recordNamespace),
recordNamespace)
case other => throw new IllegalArgumentException(s"Unexpected type $dataType.")
}
}
/**
* This function is used to construct fields of the avro record, where schema of the field is
* specified by avro representation of dataType. Since builders for record fields are different
* from those for everything else, we have to use a separate method.
*/
private def convertFieldTypeToAvro[T](
dataType: DataType,
newFieldBuilder: BaseFieldTypeBuilder[T],
structName: String,
recordNamespace: String): FieldDefault[T, _] = {
dataType match {
case ByteType => newFieldBuilder.intType()
case ShortType => newFieldBuilder.intType()
case IntegerType => newFieldBuilder.intType()
case LongType => newFieldBuilder.longType()
case FloatType => newFieldBuilder.floatType()
case DoubleType => newFieldBuilder.doubleType()
case _: DecimalType => newFieldBuilder.stringType()
case StringType => newFieldBuilder.stringType()
case BinaryType => newFieldBuilder.bytesType()
case BooleanType => newFieldBuilder.booleanType()
case TimestampType => newFieldBuilder.longType()
case ArrayType(elementType, _) =>
val builder = getSchemaBuilder(dataType.asInstanceOf[ArrayType].containsNull)
val elementSchema = convertTypeToAvro(elementType, builder, structName, recordNamespace)
newFieldBuilder.array().items(elementSchema)
case MapType(StringType, valueType, _) =>
val builder = getSchemaBuilder(dataType.asInstanceOf[MapType].valueContainsNull)
val valueSchema = convertTypeToAvro(valueType, builder, structName, recordNamespace)
newFieldBuilder.map().values(valueSchema)
case structType: StructType =>
convertStructToAvro(
structType,
newFieldBuilder.record(structName).namespace(recordNamespace),
recordNamespace)
case other => throw new IllegalArgumentException(s"Unexpected type $dataType.")
}
}
private def getSchemaBuilder(isNullable: Boolean): BaseTypeBuilder[Schema] = {
if (isNullable) {
SchemaBuilder.builder().nullable()
} else {
SchemaBuilder.builder()
}
}
/**
* This function constructs converter function for a given sparkSQL datatype. This is used in
* writing Avro records out to disk
*/
def createConverterToAvro(
dataType: DataType,
structName: String,
recordNamespace: String): (Any) => Any = {
dataType match {
case BinaryType => (item: Any) => item match {
case null => null
case bytes: Array[Byte] => ByteBuffer.wrap(bytes)
}
case ByteType | ShortType | IntegerType | LongType |
FloatType | DoubleType | StringType | BooleanType => identity
case _: DecimalType => (item: Any) => if (item == null) null else item.toString
case TimestampType => (item: Any) =>
if (item == null) null else item.asInstanceOf[Timestamp].getTime
case ArrayType(elementType, _) =>
val elementConverter = createConverterToAvro(elementType, structName, recordNamespace)
(item: Any) => {
if (item == null) {
null
} else {
val sourceArray = item.asInstanceOf[Seq[Any]]
val sourceArraySize = sourceArray.size
val targetArray = new util.ArrayList[Any](sourceArraySize)
var idx = 0
while (idx < sourceArraySize) {
targetArray.add(elementConverter(sourceArray(idx)))
idx += 1
}
targetArray
}
}
case MapType(StringType, valueType, _) =>
val valueConverter = createConverterToAvro(valueType, structName, recordNamespace)
(item: Any) => {
if (item == null) {
null
} else {
val javaMap = new HashMap[String, Any]()
item.asInstanceOf[Map[String, Any]].foreach { case (key, value) =>
javaMap.put(key, valueConverter(value))
}
javaMap
}
}
case structType: StructType =>
val builder = SchemaBuilder.record(structName).namespace(recordNamespace)
val schema: Schema = SchemaConverters.convertStructToAvro(
structType, builder, recordNamespace)
val fieldConverters = structType.fields.map(field =>
createConverterToAvro(field.dataType, field.name, recordNamespace))
(item: Any) => {
if (item == null) {
null
} else {
val record = new Record(schema)
val convertersIterator = fieldConverters.iterator
val fieldNamesIterator = dataType.asInstanceOf[StructType].fieldNames.iterator
val rowIterator = item.asInstanceOf[Row].toSeq.iterator
while (convertersIterator.hasNext) {
val converter = convertersIterator.next()
record.put(fieldNamesIterator.next(), converter(rowIterator.next()))
}
record
}
}
}
}
}
@InterfaceAudience.Private
object AvroSerdes {
// We only handle top level is record or primary type now
def serialize(input: Any, schema: Schema): Array[Byte]= {
schema.getType match {
case BOOLEAN => Bytes.toBytes(input.asInstanceOf[Boolean])
case BYTES | FIXED=> input.asInstanceOf[Array[Byte]]
case DOUBLE => Bytes.toBytes(input.asInstanceOf[Double])
case FLOAT => Bytes.toBytes(input.asInstanceOf[Float])
case INT => Bytes.toBytes(input.asInstanceOf[Int])
case LONG => Bytes.toBytes(input.asInstanceOf[Long])
case STRING => Bytes.toBytes(input.asInstanceOf[String])
case RECORD =>
val gr = input.asInstanceOf[GenericRecord]
val writer2 = new GenericDatumWriter[GenericRecord](schema)
val bao2 = new ByteArrayOutputStream()
val encoder2: BinaryEncoder = EncoderFactory.get().directBinaryEncoder(bao2, null)
writer2.write(gr, encoder2)
bao2.toByteArray()
case _ => throw new Exception(s"unsupported data type ${schema.getType}") //TODO
}
}
def deserialize(input: Array[Byte], schema: Schema): GenericRecord = {
val reader2: DatumReader[GenericRecord] = new GenericDatumReader[GenericRecord](schema)
val bai2 = new ByteArrayInputStream(input)
val decoder2: BinaryDecoder = DecoderFactory.get().directBinaryDecoder(bai2, null)
val gr2: GenericRecord = reader2.read(null, decoder2)
gr2
}
}
|
gustavoanatoly/hbase
|
hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/datasources/SchemaConverters.scala
|
Scala
|
apache-2.0
| 17,741 |
/*
* (c) Copyright 2016 Hewlett Packard Enterprise Development LP
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cogx.compiler.codegenerator.opencl.hyperkernels
import cogx.compiler.codegenerator.opencl.fragments.{BigTensorAddressing, AddressingMode, HyperKernel}
import cogx.compiler.codegenerator.opencl.hyperkernels.fastfouriertransform.{Forward, WorkDimensions, FFT3DKernelCache}
import cogx.platform.types.{Opcode, VirtualFieldRegister, FieldType}
import cogx.cogmath.geometry.Shape
import cogx.compiler.codegenerator.common.FieldPolicies._
import cogx.compiler.parser.op.{FFT3DSubOpRI, FFTOpRI, FFTOp, FFT3DSubOp}
/** Performs one pass of a 3D FFT.
*
* This kernel works on both ComplexField inputs and split real and imaginary
* ScalarFields (and also vector versions of each). The opcode for the complex
* field case is a UnaryOpcode, while it's a MulitOutputOpcode for the split
* real/imaginary case. The base class for these two opcodes, while they have
* a lot of similar parameters, is Opcode. You'll find some code below to
* extract similar parameters out of these two opcode types.
*
* @author Dick Carter and Greg Snider
* @param in The virtual field registers of the input field to be 3D FFT'd.
* @param opcode The binary opcode for this operation.
* @param resultTypes The FieldTypes of the result of this kernel.
* @param addressMode The addressing mode of this kernel.
* @param sourceCode The FFT-planner-generated source code.
* @return The synthesized hyperkernel.
*/
private[cogx]
class FFT3DHyperKernel private (in: Array[VirtualFieldRegister],
opcode: Opcode,
resultTypes: Array[FieldType],
addressMode: AddressingMode,
workDimensions: WorkDimensions,
sourceCode: String)
extends HyperKernel(opcode, in, resultTypes, addressMode) {
val fftPlanes = in(0).fieldType.tensorShape.points
// For an FFT operating on a VectorField, expand the 1D thread organization
// to 2D. The plane designator becomes "_row", not the usual "_tensorElement".
// The _tensorElement variable is not available in BigTensorAddressing mode,
// which this kernel operates in because each thread writes multiple output
// elements.
val workFieldShape =
if (fftPlanes == 1)
Shape(workDimensions.gWorkItems.toInt)
else
Shape(fftPlanes, workDimensions.gWorkItems.toInt)
val planeDesignator = if (fftPlanes == 1) "0" else "_row"
// The FFT runs with a 1D thread organization. We pick up the local and
// global workItem counts from the workDimensions object created by the
// FFT planner and set the kernel launch parameters to match. The kernel
// runs in BigTensorAddressing mode since each thread writes multiple output
// elements. Note: a 32 x 32 FFT was seen to run with 128 threads organized
// as 2 blocks of 64 threads each.
override lazy val workFieldType = new FieldType(workFieldShape, resultTypes(0).tensorShape, resultTypes(0).elementType)
override lazy val workGroup = HyperKernel.computeWorkGroupParameters(workFieldType, addressing, 1, workDimensions.lWorkItems.toInt)
val (splitRealImaginary, dir, scaleFactor) = opcode match {
case x: FFT3DSubOp => (false, x.dir, x.scaleFactor)
case y: FFT3DSubOpRI => (true, y.dir, y.scaleFactor)
case z => throw new RuntimeException(s"unexpected opcode $z, expecting FFTOp or FFTOpRI")
}
val realInput =
if (splitRealImaginary)
in.length == 1
else
!isComplexField(in(0).fieldType)
val realOutput =
if (splitRealImaginary)
resultTypes.length == 1
else
!isComplexField(resultTypes(0))
val scaling = if (scaleFactor == 1.0f) "" else scaleFactor.toString + "f * "
/** supply values for former arguments to the FFT kernel */
def postProcess(source: String) = source.
replaceAll("%dirVal%", dir.value.toString).
replaceAll("%dirName%", dir.name).
replaceAll("%batchSize%", workDimensions.batchSize.toString).
replaceAll("%realInput%", if (realInput) "1" else "0").
replaceAll("%realOutput%", if (realOutput) "1" else "0").
replaceAll("%splitRealImaginary%", if (splitRealImaginary) "1" else "0").
replaceAll("%plane%", planeDesignator).
replaceAll("%scalingMultiply%", scaling)
addCode(postProcess(sourceCode))
// debugCompile
}
/** Factory object for creating a chain of kernels to perform a 3D FFT
*/
private[cogx]
object FFT3DHyperKernel {
val DeviceMaxWorkItemsPerWorkGroup = 256
/** Create kernel DAG for 3D FFT.
*
* @param in The virtual field registers of the input field to be 3D FFT'd.
* @param opcode The opcode for this operation, with dir and dim info.
* @param resultTypes The FieldTypes of the result of this kernel.
* @return The synthesized hyperkernel.
*/
def apply(in: Array[VirtualFieldRegister], opcode: Opcode, resultTypes: Array[FieldType]):
HyperKernel =
{
val (splitRealImaginary, dir, scaleFactor, dimensions) = opcode match {
case x: FFTOp => (false, x.dir, x.scaleFactor, x.dimensions)
case y: FFTOpRI => (true, y.dir, y.scaleFactor, y.dimensions)
case z => throw new RuntimeException(s"unexpected opcode $z, expecting FFTOp or FFTOpRI")
}
val inType0 = in(0).fieldType
if (in.length > 1)
require(inType0 == in(1).fieldType)
if (resultTypes.length > 1)
require(resultTypes(0) == resultTypes(1))
require(inType0.dimensions == 3)
require(dimensions == 3)
// We can't always predict the resultType, since the inverse FFT may be
// either real or complex, depending on the original input.
if (splitRealImaginary)
require(isRealField(inType0) && inType0 == resultTypes(0))
else if (dir == Forward)
require(resultTypes(0) == toComplex(inType0))
else
require(toComplex(resultTypes(0)) == toComplex(inType0))
val fftLayers = inType0.fieldShape(0)
val fftRows = inType0.fieldShape(1)
val fftColumns = inType0.fieldShape(2)
var workDimensions = FFT3DKernelCache.workDimensions(fftLayers, fftRows, fftColumns,
DeviceMaxWorkItemsPerWorkGroup)
var sourceCodes = FFT3DKernelCache.sourceCodes(fftLayers, fftRows, fftColumns,
DeviceMaxWorkItemsPerWorkGroup)
var kernelNames = FFT3DKernelCache.kernelNames(fftLayers, fftRows, fftColumns,
DeviceMaxWorkItemsPerWorkGroup)
val passes = kernelNames.length
require(passes > 0, "Internal error: expecting FFT passes > 0")
def kernelChainUpToPass(pass: Int): HyperKernel = {
val input = if (pass == 0) in else kernelChainUpToPass(pass - 1).outputs.toArray
val resultFieldTypes =
if (pass == passes - 1)
resultTypes
else if (splitRealImaginary)
Array(inType0, inType0)
else
Array(toComplex(inType0))
// Apply requested scaling to last pass only
val scale = if (pass == passes - 1) scaleFactor else 1.0f
if (splitRealImaginary)
new FFT3DHyperKernel(input, FFT3DSubOpRI(pass, dir, scale), resultFieldTypes,
BigTensorAddressing, workDimensions(pass), sourceCodes(pass))
else
new FFT3DHyperKernel(input, FFT3DSubOp(pass, dir, scale), resultFieldTypes,
BigTensorAddressing, workDimensions(pass), sourceCodes(pass))
}
kernelChainUpToPass(passes - 1)
}
}
|
hpe-cct/cct-core
|
src/main/scala/cogx/compiler/codegenerator/opencl/hyperkernels/FFT3DHyperKernel.scala
|
Scala
|
apache-2.0
| 8,078 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.